aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acconfig.h2
-rw-r--r--include/acpi/acexcep.h6
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/ghes.h4
-rw-r--r--include/acpi/processor.h3
-rw-r--r--include/asm-generic/atomic-instrumented.h197
-rw-r--r--include/asm-generic/atomic.h33
-rw-r--r--include/asm-generic/atomic64.h15
-rw-r--r--include/asm-generic/bitops/atomic.h188
-rw-r--r--include/asm-generic/bitops/lock.h68
-rw-r--r--include/asm-generic/bug.h16
-rw-r--r--include/asm-generic/export.h12
-rw-r--r--include/asm-generic/pgtable.h38
-rw-r--r--include/asm-generic/qspinlock_types.h2
-rw-r--r--include/asm-generic/tlb.h25
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/crypto/dh.h4
-rw-r--r--include/crypto/drbg.h3
-rw-r--r--include/crypto/if_alg.h3
-rw-r--r--include/crypto/scatterwalk.h15
-rw-r--r--include/crypto/sha.h4
-rw-r--r--include/crypto/vmac.h63
-rw-r--r--include/drm/drmP.h18
-rw-r--r--include/drm/drm_atomic.h14
-rw-r--r--include/drm/drm_atomic_helper.h1
-rw-r--r--include/drm/drm_audio_component.h118
-rw-r--r--include/drm/drm_bridge.h48
-rw-r--r--include/drm/drm_client.h139
-rw-r--r--include/drm/drm_connector.h279
-rw-r--r--include/drm/drm_crtc.h276
-rw-r--r--include/drm/drm_debugfs_crc.h3
-rw-r--r--include/drm/drm_device.h21
-rw-r--r--include/drm/drm_dp_helper.h56
-rw-r--r--include/drm/drm_drv.h29
-rw-r--r--include/drm/drm_encoder.h16
-rw-r--r--include/drm/drm_fb_cma_helper.h6
-rw-r--r--include/drm/drm_fb_helper.h38
-rw-r--r--include/drm/drm_file.h7
-rw-r--r--include/drm/drm_fourcc.h2
-rw-r--r--include/drm/drm_mm.h34
-rw-r--r--include/drm/drm_mode_config.h36
-rw-r--r--include/drm/drm_modes.h2
-rw-r--r--include/drm/drm_modeset_helper_vtables.h17
-rw-r--r--include/drm/drm_of.h8
-rw-r--r--include/drm/drm_panel.h3
-rw-r--r--include/drm/drm_pci.h7
-rw-r--r--include/drm/drm_plane.h197
-rw-r--r--include/drm/drm_plane_helper.h6
-rw-r--r--include/drm/drm_prime.h6
-rw-r--r--include/drm/drm_print.h77
-rw-r--r--include/drm/drm_property.h4
-rw-r--r--include/drm/drm_vma_manager.h1
-rw-r--r--include/drm/drm_writeback.h136
-rw-r--r--include/drm/gpu_scheduler.h174
-rw-r--r--include/drm/i915_component.h85
-rw-r--r--include/drm/i915_drm.h4
-rw-r--r--include/drm/i915_pciids.h37
-rw-r--r--include/drm/tinydrm/tinydrm.h23
-rw-r--r--include/drm/ttm/ttm_bo_api.h25
-rw-r--r--include/drm/ttm/ttm_set_memory.h150
-rw-r--r--include/dt-bindings/bus/ti-sysc.h2
-rw-r--r--include/dt-bindings/clock/actions,s700-cmu.h118
-rw-r--r--include/dt-bindings/clock/aspeed-clock.h2
-rw-r--r--include/dt-bindings/clock/axg-audio-clkc.h94
-rw-r--r--include/dt-bindings/clock/axg-clkc.h4
-rw-r--r--include/dt-bindings/clock/dra7.h1
-rw-r--r--include/dt-bindings/clock/exynos5440.h44
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h1
-rw-r--r--include/dt-bindings/clock/imx6sll-clock.h9
-rw-r--r--include/dt-bindings/clock/imx6ul-clock.h46
-rw-r--r--include/dt-bindings/clock/maxim,max9485.h18
-rw-r--r--include/dt-bindings/clock/px30-cru.h389
-rw-r--r--include/dt-bindings/clock/pxa-clock.h3
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sdm845.h45
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm845.h2
-rw-r--r--include/dt-bindings/clock/r9a06g032-sysctrl.h148
-rw-r--r--include/dt-bindings/clock/rk3399-ddr.h56
-rw-r--r--include/dt-bindings/clock/sun8i-r40-ccu.h4
-rw-r--r--include/dt-bindings/clock/sun8i-tcon-top.h11
-rw-r--r--include/dt-bindings/gce/mt8173-gce.h44
-rw-r--r--include/dt-bindings/iio/adc/at91-sama5d2_adc.h16
-rw-r--r--include/dt-bindings/memory/mt2712-larb-port.h95
-rw-r--r--include/dt-bindings/pinctrl/at91.h4
-rw-r--r--include/dt-bindings/pinctrl/samsung.h7
-rw-r--r--include/dt-bindings/regulator/maxim,max77802.h5
-rw-r--r--include/dt-bindings/regulator/qcom,rpmh-regulator.h36
-rw-r--r--include/dt-bindings/reset/qcom,sdm845-aoss.h17
-rw-r--r--include/dt-bindings/soc/qcom,rpmh-rsc.h14
-rw-r--r--include/dt-bindings/usb/pd.h62
-rw-r--r--include/kvm/arm_vgic.h9
-rw-r--r--include/linux/acpi.h28
-rw-r--r--include/linux/ascii85.h38
-rw-r--r--include/linux/atmdev.h15
-rw-r--r--include/linux/atomic.h453
-rw-r--r--include/linux/audit.h5
-rw-r--r--include/linux/backing-dev-defs.h5
-rw-r--r--include/linux/backing-dev.h4
-rw-r--r--include/linux/backlight.h1
-rw-r--r--include/linux/bio.h19
-rw-r--r--include/linux/bitfield.h9
-rw-r--r--include/linux/bitmap.h8
-rw-r--r--include/linux/bitops.h25
-rw-r--r--include/linux/bits.h26
-rw-r--r--include/linux/blk-cgroup.h146
-rw-r--r--include/linux/blk-mq.h18
-rw-r--r--include/linux/blk_types.h27
-rw-r--r--include/linux/blkdev.h70
-rw-r--r--include/linux/bootmem.h17
-rw-r--r--include/linux/bpf-cgroup.h81
-rw-r--r--include/linux/bpf.h99
-rw-r--r--include/linux/bpf_lirc.h5
-rw-r--r--include/linux/bpf_types.h9
-rw-r--r--include/linux/bpfilter.h6
-rw-r--r--include/linux/brcmphy.h1
-rw-r--r--include/linux/build-salt.h20
-rw-r--r--include/linux/can/dev.h7
-rw-r--r--include/linux/cdrom.h3
-rw-r--r--include/linux/ceph/auth.h8
-rw-r--r--include/linux/ceph/ceph_features.h7
-rw-r--r--include/linux/ceph/decode.h18
-rw-r--r--include/linux/ceph/messenger.h8
-rw-r--r--include/linux/ceph/msgr.h2
-rw-r--r--include/linux/ceph/osd_client.h10
-rw-r--r--include/linux/ceph/pagelist.h2
-rw-r--r--include/linux/cgroup-defs.h3
-rw-r--r--include/linux/cgroup.h30
-rw-r--r--include/linux/clk-provider.h26
-rw-r--r--include/linux/clk.h33
-rw-r--r--include/linux/clk/at91_pmc.h15
-rw-r--r--include/linux/clocksource.h3
-rw-r--r--include/linux/cma.h2
-rw-r--r--include/linux/compat.h28
-rw-r--r--include/linux/compat_time.h9
-rw-r--r--include/linux/compiler-clang.h20
-rw-r--r--include/linux/compiler-gcc.h167
-rw-r--r--include/linux/compiler-intel.h13
-rw-r--r--include/linux/compiler.h21
-rw-r--r--include/linux/compiler_types.h267
-rw-r--r--include/linux/console.h19
-rw-r--r--include/linux/console_struct.h5
-rw-r--r--include/linux/coresight.h68
-rw-r--r--include/linux/cpu.h25
-rw-r--r--include/linux/cpuhotplug.h3
-rw-r--r--include/linux/cpumask.h18
-rw-r--r--include/linux/crash_core.h2
-rw-r--r--include/linux/crc32poly.h20
-rw-r--r--include/linux/crc64.h11
-rw-r--r--include/linux/cred.h15
-rw-r--r--include/linux/crypto.h5
-rw-r--r--include/linux/dax.h2
-rw-r--r--include/linux/dcache.h18
-rw-r--r--include/linux/delayacct.h2
-rw-r--r--include/linux/device.h135
-rw-r--r--include/linux/dm-kcopyd.h12
-rw-r--r--include/linux/dma-buf.h21
-rw-r--r--include/linux/dma-contiguous.h6
-rw-r--r--include/linux/dma-direction.h6
-rw-r--r--include/linux/dma-fence.h32
-rw-r--r--include/linux/dma-mapping.h9
-rw-r--r--include/linux/dma-noncoherent.h8
-rw-r--r--include/linux/dma/pxa-dma.h9
-rw-r--r--include/linux/dma/xilinx_dma.h2
-rw-r--r--include/linux/dmaengine.h6
-rw-r--r--include/linux/dmar.h5
-rw-r--r--include/linux/efi.h15
-rw-r--r--include/linux/etherdevice.h3
-rw-r--r--include/linux/eventfd.h1
-rw-r--r--include/linux/export.h57
-rw-r--r--include/linux/f2fs_fs.h5
-rw-r--r--include/linux/fb.h10
-rw-r--r--include/linux/file.h8
-rw-r--r--include/linux/filter.h113
-rw-r--r--include/linux/fpga/fpga-mgr.h24
-rw-r--r--include/linux/fpga/fpga-region.h2
-rw-r--r--include/linux/fs.h78
-rw-r--r--include/linux/fsi-sbefifo.h33
-rw-r--r--include/linux/fsi.h12
-rw-r--r--include/linux/fsl/guts.h1
-rw-r--r--include/linux/fsl/ptp_qoriq.h44
-rw-r--r--include/linux/fsnotify.h14
-rw-r--r--include/linux/fsnotify_backend.h52
-rw-r--r--include/linux/ftrace.h19
-rw-r--r--include/linux/fwnode.h2
-rw-r--r--include/linux/genhd.h14
-rw-r--r--include/linux/gfp.h291
-rw-r--r--include/linux/gnss.h75
-rw-r--r--include/linux/goldfish.h14
-rw-r--r--include/linux/gpio.h2
-rw-r--r--include/linux/gpio/aspeed.h15
-rw-r--r--include/linux/gpio/consumer.h14
-rw-r--r--include/linux/gpio/driver.h3
-rw-r--r--include/linux/gpio_keys.h2
-rw-r--r--include/linux/hid.h20
-rw-r--r--include/linux/huge_mm.h9
-rw-r--r--include/linux/hugetlb.h5
-rw-r--r--include/linux/hwmon.h32
-rw-r--r--include/linux/hwspinlock.h37
-rw-r--r--include/linux/hyperv.h33
-rw-r--r--include/linux/i2c.h50
-rw-r--r--include/linux/idle_inject.h29
-rw-r--r--include/linux/idr.h11
-rw-r--r--include/linux/ieee80211.h437
-rw-r--r--include/linux/if_bridge.h4
-rw-r--r--include/linux/if_team.h18
-rw-r--r--include/linux/igmp.h2
-rw-r--r--include/linux/iio/buffer-dma.h2
-rw-r--r--include/linux/ima.h11
-rw-r--r--include/linux/inetdevice.h1
-rw-r--r--include/linux/init.h44
-rw-r--r--include/linux/init_task.h9
-rw-r--r--include/linux/inotify.h2
-rw-r--r--include/linux/input/mt.h2
-rw-r--r--include/linux/integrity.h13
-rw-r--r--include/linux/intel-iommu.h81
-rw-r--r--include/linux/iomap.h47
-rw-r--r--include/linux/iommu.h23
-rw-r--r--include/linux/ipc.h2
-rw-r--r--include/linux/ipc_namespace.h5
-rw-r--r--include/linux/irq.h1
-rw-r--r--include/linux/irqchip/arm-gic-v3.h13
-rw-r--r--include/linux/irqchip/arm-gic.h11
-rw-r--r--include/linux/irqdesc.h5
-rw-r--r--include/linux/irqflags.h15
-rw-r--r--include/linux/jiffies.h5
-rw-r--r--include/linux/joystick.h4
-rw-r--r--include/linux/jump_label.h6
-rw-r--r--include/linux/kasan.h13
-rw-r--r--include/linux/kcore.h2
-rw-r--r--include/linux/kernel.h37
-rw-r--r--include/linux/kernfs.h28
-rw-r--r--include/linux/kobject.h21
-rw-r--r--include/linux/kprobes.h53
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/ktime.h7
-rw-r--r--include/linux/kvm_host.h28
-rw-r--r--include/linux/leds.h36
-rw-r--r--include/linux/libata.h26
-rw-r--r--include/linux/list.h30
-rw-r--r--include/linux/list_lru.h43
-rw-r--r--include/linux/lockd/lockd.h4
-rw-r--r--include/linux/lockdep.h6
-rw-r--r--include/linux/lsm_hooks.h8
-rw-r--r--include/linux/mailbox/mtk-cmdq-mailbox.h77
-rw-r--r--include/linux/marvell_phy.h2
-rw-r--r--include/linux/memblock.h76
-rw-r--r--include/linux/memcontrol.h105
-rw-r--r--include/linux/memory.h1
-rw-r--r--include/linux/memory_hotplug.h1
-rw-r--r--include/linux/mfd/as3722.h3
-rw-r--r--include/linux/mfd/cros_ec.h2
-rw-r--r--include/linux/mfd/cros_ec_commands.h229
-rw-r--r--include/linux/mfd/da9063/core.h15
-rw-r--r--include/linux/mfd/madera/core.h187
-rw-r--r--include/linux/mfd/madera/pdata.h59
-rw-r--r--include/linux/mfd/madera/registers.h3968
-rw-r--r--include/linux/mfd/rave-sp.h1
-rw-r--r--include/linux/mfd/rohm-bd718x7.h332
-rw-r--r--include/linux/mfd/syscon/imx6q-iomuxc-gpr.h3
-rw-r--r--include/linux/mfd/tmio.h3
-rw-r--r--include/linux/mfd/wm8994/pdata.h6
-rw-r--r--include/linux/mlx4/device.h8
-rw-r--r--include/linux/mlx5/device.h24
-rw-r--r--include/linux/mlx5/driver.h33
-rw-r--r--include/linux/mlx5/eswitch.h2
-rw-r--r--include/linux/mlx5/fs.h7
-rw-r--r--include/linux/mlx5/mlx5_ifc.h193
-rw-r--r--include/linux/mlx5/mlx5_ifc_fpga.h1
-rw-r--r--include/linux/mlx5/vport.h2
-rw-r--r--include/linux/mm.h59
-rw-r--r--include/linux/mm_types.h246
-rw-r--r--include/linux/mmc/host.h10
-rw-r--r--include/linux/mmc/mmc.h2
-rw-r--r--include/linux/mmu_notifier.h33
-rw-r--r--include/linux/mmzone.h57
-rw-r--r--include/linux/mod_devicetable.h15
-rw-r--r--include/linux/module.h4
-rw-r--r--include/linux/mroute_base.h3
-rw-r--r--include/linux/mtd/mtd.h8
-rw-r--r--include/linux/mtd/rawnand.h126
-rw-r--r--include/linux/mtd/spi-nor.h1
-rw-r--r--include/linux/mtd/spinand.h421
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/net_dim.h1
-rw-r--r--include/linux/netdev_features.h2
-rw-r--r--include/linux/netdevice.h235
-rw-r--r--include/linux/netfilter.h37
-rw-r--r--include/linux/netfilter/nfnetlink.h1
-rw-r--r--include/linux/netfilter/nfnetlink_osf.h (renamed from include/linux/netfilter/nf_osf.h)23
-rw-r--r--include/linux/netfilter_bridge.h11
-rw-r--r--include/linux/netfilter_ipv4.h11
-rw-r--r--include/linux/netfilter_ipv6.h5
-rw-r--r--include/linux/netlink.h1
-rw-r--r--include/linux/nfs4.h9
-rw-r--r--include/linux/nfs_fs.h11
-rw-r--r--include/linux/nfs_fs_sb.h4
-rw-r--r--include/linux/nfs_xdr.h17
-rw-r--r--include/linux/nmi.h10
-rw-r--r--include/linux/node.h12
-rw-r--r--include/linux/nodemask.h2
-rw-r--r--include/linux/nvme.h72
-rw-r--r--include/linux/of_iommu.h4
-rw-r--r--include/linux/omap-mailbox.h5
-rw-r--r--include/linux/oom.h4
-rw-r--r--include/linux/openvswitch.h5
-rw-r--r--include/linux/overflow.h31
-rw-r--r--include/linux/page-flags.h5
-rw-r--r--include/linux/page_ext.h15
-rw-r--r--include/linux/pci-dma-compat.h8
-rw-r--r--include/linux/pci-epc.h16
-rw-r--r--include/linux/pci-epf.h1
-rw-r--r--include/linux/pci.h67
-rw-r--r--include/linux/pci_hotplug.h15
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/percpu.h2
-rw-r--r--include/linux/percpu_ida.h83
-rw-r--r--include/linux/perf/arm_pmu.h11
-rw-r--r--include/linux/perf_event.h3
-rw-r--r--include/linux/phy.h12
-rw-r--r--include/linux/phylink.h1
-rw-r--r--include/linux/pid.h11
-rw-r--r--include/linux/pinctrl/pinconf.h3
-rw-r--r--include/linux/platform_data/ams-delta-fiq.h58
-rw-r--r--include/linux/platform_data/bt-nokia-h4p.h38
-rw-r--r--include/linux/platform_data/gpio-davinci.h3
-rw-r--r--include/linux/platform_data/i2c-hid.h7
-rw-r--r--include/linux/platform_data/i2c-ocores.h2
-rw-r--r--include/linux/platform_data/jz4740/jz4740_nand.h34
-rw-r--r--include/linux/platform_data/media/sii9234.h24
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h2
-rw-r--r--include/linux/platform_data/mmp_dma.h4
-rw-r--r--include/linux/platform_data/mtd-davinci-aemif.h1
-rw-r--r--include/linux/platform_data/mtd-orion_nand.h1
-rw-r--r--include/linux/platform_data/pm33xx.h29
-rw-r--r--include/linux/platform_data/sh_ipmmu.h18
-rw-r--r--include/linux/platform_data/ti-sysc.h1
-rw-r--r--include/linux/platform_data/txx9/ndfmc.h30
-rw-r--r--include/linux/platform_device.h2
-rw-r--r--include/linux/pm_domain.h21
-rw-r--r--include/linux/poll.h12
-rw-r--r--include/linux/posix-timers.h4
-rw-r--r--include/linux/power_supply.h1
-rw-r--r--include/linux/preempt.h2
-rw-r--r--include/linux/printk.h10
-rw-r--r--include/linux/proc_fs.h2
-rw-r--r--include/linux/pti.h1
-rw-r--r--include/linux/ptrace.h2
-rw-r--r--include/linux/pxa2xx_ssp.h10
-rw-r--r--include/linux/qcom_scm.h4
-rw-r--r--include/linux/qed/qed_eth_if.h6
-rw-r--r--include/linux/qed/qed_if.h15
-rw-r--r--include/linux/random.h3
-rw-r--r--include/linux/rculist.h19
-rw-r--r--include/linux/rcupdate.h20
-rw-r--r--include/linux/rcutiny.h2
-rw-r--r--include/linux/reciprocal_div.h68
-rw-r--r--include/linux/refcount.h38
-rw-r--r--include/linux/regmap.h54
-rw-r--r--include/linux/regulator/driver.h4
-rw-r--r--include/linux/regulator/pfuze100.h11
-rw-r--r--include/linux/remoteproc.h19
-rw-r--r--include/linux/rfkill.h20
-rw-r--r--include/linux/rhashtable-types.h137
-rw-r--r--include/linux/rhashtable.h164
-rw-r--r--include/linux/ring_buffer.h3
-rw-r--r--include/linux/rmi.h2
-rw-r--r--include/linux/rtc.h21
-rw-r--r--include/linux/rtmutex.h7
-rw-r--r--include/linux/sbitmap.h2
-rw-r--r--include/linux/scatterlist.h18
-rw-r--r--include/linux/sched.h77
-rw-r--r--include/linux/sched/mm.h37
-rw-r--r--include/linux/sched/signal.h51
-rw-r--r--include/linux/sched/sysctl.h2
-rw-r--r--include/linux/sched/task.h2
-rw-r--r--include/linux/sched/user.h5
-rw-r--r--include/linux/sched_clock.h5
-rw-r--r--include/linux/sctp.h7
-rw-r--r--include/linux/security.h32
-rw-r--r--include/linux/selection.h6
-rw-r--r--include/linux/serial_8250.h3
-rw-r--r--include/linux/serial_core.h7
-rw-r--r--include/linux/sfp.h72
-rw-r--r--include/linux/shrinker.h21
-rw-r--r--include/linux/signal.h10
-rw-r--r--include/linux/skbuff.h32
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/slimbus.h60
-rw-r--r--include/linux/slub_def.h4
-rw-r--r--include/linux/smpboot.h15
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h180
-rw-r--r--include/linux/soc/qcom/mdt_loader.h4
-rw-r--r--include/linux/soc/renesas/rcar-sysc.h13
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h8
-rw-r--r--include/linux/spi/adi_spi3.h254
-rw-r--r--include/linux/spi/spi-mem.h18
-rw-r--r--include/linux/spi/spi_bitbang.h5
-rw-r--r--include/linux/spinlock.h75
-rw-r--r--include/linux/srcu.h22
-rw-r--r--include/linux/ssb/ssb.h2
-rw-r--r--include/linux/stmmac.h1
-rw-r--r--include/linux/sunrpc/auth.h5
-rw-r--r--include/linux/sunrpc/clnt.h1
-rw-r--r--include/linux/sunrpc/metrics.h4
-rw-r--r--include/linux/sunrpc/svc.h4
-rw-r--r--include/linux/sunrpc/svcauth.h3
-rw-r--r--include/linux/suspend.h2
-rw-r--r--include/linux/swait.h36
-rw-r--r--include/linux/swap.h13
-rw-r--r--include/linux/swapfile.h2
-rw-r--r--include/linux/swapops.h15
-rw-r--r--include/linux/switchtec.h4
-rw-r--r--include/linux/syscalls.h26
-rw-r--r--include/linux/sysfs.h20
-rw-r--r--include/linux/t10-pi.h24
-rw-r--r--include/linux/tcp.h18
-rw-r--r--include/linux/time.h4
-rw-r--r--include/linux/time64.h1
-rw-r--r--include/linux/timekeeping.h20
-rw-r--r--include/linux/torture.h4
-rw-r--r--include/linux/tpm.h7
-rw-r--r--include/linux/tracehook.h2
-rw-r--r--include/linux/tracepoint.h59
-rw-r--r--include/linux/tty_ldisc.h4
-rw-r--r--include/linux/udp.h4
-rw-r--r--include/linux/uio_driver.h2
-rw-r--r--include/linux/uprobes.h2
-rw-r--r--include/linux/usb/audio-v3.h19
-rw-r--r--include/linux/usb/hcd.h1
-rw-r--r--include/linux/usb/pd.h1
-rw-r--r--include/linux/usb/tcpm.h11
-rw-r--r--include/linux/usb/typec.h55
-rw-r--r--include/linux/usb/typec_altmode.h160
-rw-r--r--include/linux/usb/typec_dp.h95
-rw-r--r--include/linux/usb/typec_mux.h2
-rw-r--r--include/linux/userfaultfd_k.h5
-rw-r--r--include/linux/verification.h6
-rw-r--r--include/linux/vga_switcheroo.h8
-rw-r--r--include/linux/virtio_config.h7
-rw-r--r--include/linux/vmacache.h6
-rw-r--r--include/linux/w1.h2
-rw-r--r--include/linux/wkup_m3_ipc.h9
-rw-r--r--include/linux/ww_mutex.h45
-rw-r--r--include/media/cec-notifier.h27
-rw-r--r--include/media/cec-pin.h4
-rw-r--r--include/media/cec.h12
-rw-r--r--include/media/dvb_frontend.h49
-rw-r--r--include/media/i2c/lm3560.h1
-rw-r--r--include/media/v4l2-common.h2
-rw-r--r--include/media/v4l2-ctrls.h4
-rw-r--r--include/media/v4l2-ioctl.h15
-rw-r--r--include/media/v4l2-mediabus.h2
-rw-r--r--include/media/v4l2-mem2mem.h56
-rw-r--r--include/media/vsp1.h2
-rw-r--r--include/misc/cxl-base.h10
-rw-r--r--include/misc/cxl.h68
-rw-r--r--include/net/9p/client.h11
-rw-r--r--include/net/act_api.h31
-rw-r--r--include/net/addrconf.h1
-rw-r--r--include/net/af_ieee802154.h1
-rw-r--r--include/net/af_rxrpc.h2
-rw-r--r--include/net/af_vsock.h4
-rw-r--r--include/net/bluetooth/bluetooth.h2
-rw-r--r--include/net/bluetooth/hci.h224
-rw-r--r--include/net/bluetooth/hci_core.h34
-rw-r--r--include/net/bluetooth/mgmt.h55
-rw-r--r--include/net/bond_3ad.h2
-rw-r--r--include/net/bonding.h13
-rw-r--r--include/net/busy_poll.h16
-rw-r--r--include/net/cfg80211.h118
-rw-r--r--include/net/dcbnl.h13
-rw-r--r--include/net/devlink.h195
-rw-r--r--include/net/dsa.h3
-rw-r--r--include/net/dst.h14
-rw-r--r--include/net/flow_dissector.h21
-rw-r--r--include/net/gen_stats.h4
-rw-r--r--include/net/ieee80211_radiotap.h123
-rw-r--r--include/net/inet_common.h2
-rw-r--r--include/net/inet_connection_sock.h6
-rw-r--r--include/net/inet_frag.h11
-rw-r--r--include/net/inet_sock.h9
-rw-r--r--include/net/ip.h27
-rw-r--r--include/net/ip6_fib.h10
-rw-r--r--include/net/ip6_route.h6
-rw-r--r--include/net/ip_tunnels.h8
-rw-r--r--include/net/ip_vs.h18
-rw-r--r--include/net/ipv6.h76
-rw-r--r--include/net/ipv6_frag.h104
-rw-r--r--include/net/iucv/af_iucv.h2
-rw-r--r--include/net/lag.h17
-rw-r--r--include/net/llc.h5
-rw-r--r--include/net/mac80211.h64
-rw-r--r--include/net/net_namespace.h11
-rw-r--r--include/net/netevent.h1
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h7
-rw-r--r--include/net/netfilter/nf_conntrack_core.h15
-rw-r--r--include/net/netfilter/nf_conntrack_count.h37
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h4
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h84
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h16
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h39
-rw-r--r--include/net/netfilter/nf_flow_table.h2
-rw-r--r--include/net/netfilter/nf_log.h3
-rw-r--r--include/net/netfilter/nf_tables.h11
-rw-r--r--include/net/netfilter/nf_tables_core.h13
-rw-r--r--include/net/netfilter/nf_tproxy.h12
-rw-r--r--include/net/netns/hash.h7
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/ipv6.h2
-rw-r--r--include/net/netns/nftables.h1
-rw-r--r--include/net/pkt_cls.h30
-rw-r--r--include/net/pkt_sched.h7
-rw-r--r--include/net/sch_generic.h70
-rw-r--r--include/net/scm.h1
-rw-r--r--include/net/sctp/sctp.h3
-rw-r--r--include/net/sctp/structs.h52
-rw-r--r--include/net/seg6.h2
-rw-r--r--include/net/seg6_hmac.h2
-rw-r--r--include/net/seg6_local.h4
-rw-r--r--include/net/smc.h65
-rw-r--r--include/net/sock.h81
-rw-r--r--include/net/sock_reuseport.h19
-rw-r--r--include/net/tc_act/tc_csum.h1
-rw-r--r--include/net/tc_act/tc_pedit.h1
-rw-r--r--include/net/tc_act/tc_skbedit.h37
-rw-r--r--include/net/tc_act/tc_tunnel_key.h1
-rw-r--r--include/net/tcp.h78
-rw-r--r--include/net/tls.h92
-rw-r--r--include/net/transp_v6.h3
-rw-r--r--include/net/udp.h6
-rw-r--r--include/net/udp_tunnel.h6
-rw-r--r--include/net/xdp.h20
-rw-r--r--include/net/xdp_sock.h4
-rw-r--r--include/net/xfrm.h61
-rw-r--r--include/pcmcia/ds.h10
-rw-r--r--include/rdma/ib.h4
-rw-r--r--include/rdma/ib_addr.h6
-rw-r--r--include/rdma/ib_cache.h59
-rw-r--r--include/rdma/ib_cm.h18
-rw-r--r--include/rdma/ib_mad.h33
-rw-r--r--include/rdma/ib_sa.h49
-rw-r--r--include/rdma/ib_umem_odp.h3
-rw-r--r--include/rdma/ib_verbs.h270
-rw-r--r--include/rdma/opa_addr.h2
-rw-r--r--include/rdma/rdma_cm.h2
-rw-r--r--include/rdma/rdmavt_qp.h30
-rw-r--r--include/rdma/uverbs_ioctl.h627
-rw-r--r--include/rdma/uverbs_named_ioctl.h109
-rw-r--r--include/rdma/uverbs_std_types.h96
-rw-r--r--include/rdma/uverbs_types.h133
-rw-r--r--include/scsi/libsas.h2
-rw-r--r--include/scsi/scsi_cmnd.h13
-rw-r--r--include/scsi/scsi_device.h14
-rw-r--r--include/scsi/scsi_host.h1
-rw-r--r--include/soc/bcm2835/raspberrypi-firmware.h1
-rw-r--r--include/soc/fsl/dpaa2-fd.h438
-rw-r--r--include/soc/fsl/dpaa2-global.h177
-rw-r--r--include/soc/fsl/dpaa2-io.h115
-rw-r--r--include/soc/qcom/rpmh.h51
-rw-r--r--include/soc/qcom/tcs.h56
-rw-r--r--include/sound/ac97/codec.h8
-rw-r--r--include/sound/ac97/compat.h9
-rw-r--r--include/sound/ac97/controller.h8
-rw-r--r--include/sound/ac97/regs.h20
-rw-r--r--include/sound/ac97_codec.h25
-rw-r--r--include/sound/compress_driver.h21
-rw-r--r--include/sound/dmaengine_pcm.h14
-rw-r--r--include/sound/hda_component.h61
-rw-r--r--include/sound/hda_i915.h37
-rw-r--r--include/sound/hdaudio.h65
-rw-r--r--include/sound/hdaudio_ext.h123
-rw-r--r--include/sound/memalloc.h18
-rw-r--r--include/sound/pcm.h7
-rw-r--r--include/sound/pcm_params.h10
-rw-r--r--include/sound/pxa2xx-lib.h13
-rw-r--r--include/sound/rt5682.h40
-rw-r--r--include/sound/sb16_csp.h2
-rw-r--r--include/sound/seq_midi_event.h6
-rw-r--r--include/sound/seq_virmidi.h3
-rw-r--r--include/sound/sh_fsi.h13
-rw-r--r--include/sound/simple_card.h7
-rw-r--r--include/sound/simple_card_utils.h23
-rw-r--r--include/sound/soc-acpi-intel-match.h19
-rw-r--r--include/sound/soc-acpi.h13
-rw-r--r--include/sound/soc-dai.h15
-rw-r--r--include/sound/soc-dapm.h11
-rw-r--r--include/sound/soc-dpcm.h7
-rw-r--r--include/sound/soc-topology.h37
-rw-r--r--include/sound/soc.h31
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/target/target_core_backend.h6
-rw-r--r--include/target/target_core_base.h16
-rw-r--r--include/target/target_core_fabric.h10
-rw-r--r--include/trace/events/btrfs.h3
-rw-r--r--include/trace/events/cgroup.h47
-rw-r--r--include/trace/events/clk.h36
-rw-r--r--include/trace/events/fib.h2
-rw-r--r--include/trace/events/filelock.h5
-rw-r--r--include/trace/events/fsi_master_ast_cf.h150
-rw-r--r--include/trace/events/fsi_master_gpio.h102
-rw-r--r--include/trace/events/net.h7
-rw-r--r--include/trace/events/power.h25
-rw-r--r--include/trace/events/preemptirq.h23
-rw-r--r--include/trace/events/rcu.h112
-rw-r--r--include/trace/events/rxrpc.h129
-rw-r--r--include/trace/events/sock.h30
-rw-r--r--include/trace/events/xdp.h5
-rw-r--r--include/uapi/asm-generic/socket.h3
-rw-r--r--include/uapi/asm-generic/unistd.h4
-rw-r--r--include/uapi/drm/amdgpu_drm.h27
-rw-r--r--include/uapi/drm/drm.h9
-rw-r--r--include/uapi/drm/drm_fourcc.h176
-rw-r--r--include/uapi/drm/drm_mode.h8
-rw-r--r--include/uapi/drm/vmwgfx_drm.h166
-rw-r--r--include/uapi/linux/aio_abi.h6
-rw-r--r--include/uapi/linux/audit.h4
-rw-r--r--include/uapi/linux/auto_fs.h8
-rw-r--r--include/uapi/linux/bcache.h12
-rw-r--r--include/uapi/linux/blkzoned.h2
-rw-r--r--include/uapi/linux/bpf.h130
-rw-r--r--include/uapi/linux/btf.h2
-rw-r--r--include/uapi/linux/can.h2
-rw-r--r--include/uapi/linux/cec.h2
-rw-r--r--include/uapi/linux/dcbnl.h3
-rw-r--r--include/uapi/linux/devlink.h42
-rw-r--r--include/uapi/linux/dvb/audio.h37
-rw-r--r--include/uapi/linux/dvb/video.h58
-rw-r--r--include/uapi/linux/elf.h2
-rw-r--r--include/uapi/linux/errqueue.h4
-rw-r--r--include/uapi/linux/ethtool.h11
-rw-r--r--include/uapi/linux/eventpoll.h8
-rw-r--r--include/uapi/linux/fpga-dfl.h179
-rw-r--r--include/uapi/linux/fsi.h58
-rw-r--r--include/uapi/linux/if_link.h17
-rw-r--r--include/uapi/linux/iio/types.h3
-rw-r--r--include/uapi/linux/ila.h1
-rw-r--r--include/uapi/linux/inotify.h1
-rw-r--r--include/uapi/linux/input.h9
-rw-r--r--include/uapi/linux/ip.h1
-rw-r--r--include/uapi/linux/joystick.h4
-rw-r--r--include/uapi/linux/keyboard.h23
-rw-r--r--include/uapi/linux/kfd_ioctl.h33
-rw-r--r--include/uapi/linux/kvm.h6
-rw-r--r--include/uapi/linux/kvm_para.h2
-rw-r--r--include/uapi/linux/l2tp.h15
-rw-r--r--include/uapi/linux/media-bus-format.h3
-rw-r--r--include/uapi/linux/media.h46
-rw-r--r--include/uapi/linux/mii.h1
-rw-r--r--include/uapi/linux/mroute.h2
-rw-r--r--include/uapi/linux/nbd.h3
-rw-r--r--include/uapi/linux/net_tstamp.h18
-rw-r--r--include/uapi/linux/netconf.h1
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h124
-rw-r--r--include/uapi/linux/netfilter/nfnetlink_osf.h (renamed from include/uapi/linux/netfilter/nf_osf.h)34
-rw-r--r--include/uapi/linux/netfilter/xt_osf.h24
-rw-r--r--include/uapi/linux/netfilter_bridge.h11
-rw-r--r--include/uapi/linux/nl80211.h102
-rw-r--r--include/uapi/linux/openvswitch.h3
-rw-r--r--include/uapi/linux/pci_regs.h4
-rw-r--r--include/uapi/linux/pcitest.h3
-rw-r--r--include/uapi/linux/perf_event.h2
-rw-r--r--include/uapi/linux/pkt_cls.h41
-rw-r--r--include/uapi/linux/pkt_sched.h150
-rw-r--r--include/uapi/linux/pmu.h4
-rw-r--r--include/uapi/linux/ppp-ioctl.h2
-rw-r--r--include/uapi/linux/rds.h69
-rw-r--r--include/uapi/linux/rseq.h102
-rw-r--r--include/uapi/linux/rtnetlink.h7
-rw-r--r--include/uapi/linux/sctp.h5
-rw-r--r--include/uapi/linux/smc_diag.h25
-rw-r--r--include/uapi/linux/snmp.h3
-rw-r--r--include/uapi/linux/sysctl.h3
-rw-r--r--include/uapi/linux/target_core_user.h4
-rw-r--r--include/uapi/linux/tc_act/tc_pedit.h9
-rw-r--r--include/uapi/linux/tc_act/tc_skbedit.h2
-rw-r--r--include/uapi/linux/tc_act/tc_tunnel_key.h28
-rw-r--r--include/uapi/linux/tcp.h14
-rw-r--r--include/uapi/linux/time.h7
-rw-r--r--include/uapi/linux/tipc_netlink.h14
-rw-r--r--include/uapi/linux/types_32_64.h50
-rw-r--r--include/uapi/linux/usb/audio.h49
-rw-r--r--include/uapi/linux/usb/g_uvc.h39
-rw-r--r--include/uapi/linux/usb/tmc.h13
-rw-r--r--include/uapi/linux/uvcvideo.h2
-rw-r--r--include/uapi/linux/v4l2-controls.h20
-rw-r--r--include/uapi/linux/v4l2-subdev.h4
-rw-r--r--include/uapi/linux/vhost.h18
-rw-r--r--include/uapi/linux/videodev2.h8
-rw-r--r--include/uapi/linux/xfrm.h5
-rw-r--r--include/uapi/mtd/ubi-user.h18
-rw-r--r--include/uapi/rdma/cxgb4-abi.h32
-rw-r--r--include/uapi/rdma/hns-abi.h1
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h7
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h58
-rw-r--r--include/uapi/rdma/ib_user_verbs.h5
-rw-r--r--include/uapi/rdma/mlx5-abi.h6
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h121
-rw-r--r--include/uapi/rdma/qedr-abi.h17
-rw-r--r--include/uapi/rdma/rdma_user_ioctl_cmds.h7
-rw-r--r--include/uapi/xen/gntdev.h106
-rw-r--r--include/video/mipi_display.h3
-rw-r--r--include/video/udlfb.h6
-rw-r--r--include/xen/grant_table.h21
-rw-r--r--include/xen/interface/io/displif.h8
-rw-r--r--include/xen/interface/io/kbdif.h78
-rw-r--r--include/xen/interface/io/sndif.h10
-rw-r--r--include/xen/mem-reservation.h59
-rw-r--r--include/xen/xen.h6
708 files changed, 21766 insertions, 5544 deletions
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 012c55cb22ba..e6964e97acdd 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -89,7 +89,7 @@
/* Maximum object reference count (detects object deletion issues) */
-#define ACPI_MAX_REFERENCE_COUNT 0x1000
+#define ACPI_MAX_REFERENCE_COUNT 0x4000
/* Default page size for use in mapping memory for operation regions */
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 226e5aeba6c2..856c56ef0143 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -59,6 +59,12 @@ struct acpi_exception_info {
#define AE_OK (acpi_status) 0x0000
+#define ACPI_ENV_EXCEPTION(status) (status & AE_CODE_ENVIRONMENTAL)
+#define ACPI_AML_EXCEPTION(status) (status & AE_CODE_AML)
+#define ACPI_PROG_EXCEPTION(status) (status & AE_CODE_PROGRAMMER)
+#define ACPI_TABLE_EXCEPTION(status) (status & AE_CODE_ACPI_TABLES)
+#define ACPI_CNTL_EXCEPTION(status) (status & AE_CODE_CONTROL)
+
/*
* Environmental exceptions
*/
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 48d84f0d9547..9566f99cc3c0 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20180531
+#define ACPI_CA_VERSION 0x20180810
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/ghes.h b/include/acpi/ghes.h
index 1624e2be485c..82cb4eb225a4 100644
--- a/include/acpi/ghes.h
+++ b/include/acpi/ghes.h
@@ -118,6 +118,10 @@ static inline void *acpi_hest_get_next(struct acpi_hest_generic_data *gdata)
(void *)section - (void *)(estatus + 1) < estatus->data_length; \
section = acpi_hest_get_next(section))
+#ifdef CONFIG_ACPI_APEI_SEA
int ghes_notify_sea(void);
+#else
+static inline int ghes_notify_sea(void) { return -ENOENT; }
+#endif
#endif /* GHES_H */
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 40a916efd7c0..1194a4c78d55 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -309,7 +309,7 @@ static inline void acpi_processor_ppc_exit(void)
{
return;
}
-static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr,
+static inline void acpi_processor_ppc_has_changed(struct acpi_processor *pr,
int event_flag)
{
static unsigned int printout = 1;
@@ -320,7 +320,6 @@ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr,
"Consider compiling CPUfreq support into your kernel.\n");
printout = 0;
}
- return 0;
}
static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
{
diff --git a/include/asm-generic/atomic-instrumented.h b/include/asm-generic/atomic-instrumented.h
index ec07f23678ea..0d4b1d3dbc1e 100644
--- a/include/asm-generic/atomic-instrumented.h
+++ b/include/asm-generic/atomic-instrumented.h
@@ -84,42 +84,59 @@ static __always_inline bool atomic64_try_cmpxchg(atomic64_t *v, s64 *old, s64 ne
}
#endif
-static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
+#ifdef arch_atomic_fetch_add_unless
+#define atomic_fetch_add_unless atomic_fetch_add_unless
+static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
kasan_check_write(v, sizeof(*v));
- return __arch_atomic_add_unless(v, a, u);
+ return arch_atomic_fetch_add_unless(v, a, u);
}
+#endif
-
-static __always_inline bool atomic64_add_unless(atomic64_t *v, s64 a, s64 u)
+#ifdef arch_atomic64_fetch_add_unless
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
kasan_check_write(v, sizeof(*v));
- return arch_atomic64_add_unless(v, a, u);
+ return arch_atomic64_fetch_add_unless(v, a, u);
}
+#endif
+#ifdef arch_atomic_inc
+#define atomic_inc atomic_inc
static __always_inline void atomic_inc(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_inc(v);
}
+#endif
+#ifdef arch_atomic64_inc
+#define atomic64_inc atomic64_inc
static __always_inline void atomic64_inc(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic64_inc(v);
}
+#endif
+#ifdef arch_atomic_dec
+#define atomic_dec atomic_dec
static __always_inline void atomic_dec(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic_dec(v);
}
+#endif
+#ifdef atch_atomic64_dec
+#define atomic64_dec
static __always_inline void atomic64_dec(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
arch_atomic64_dec(v);
}
+#endif
static __always_inline void atomic_add(int i, atomic_t *v)
{
@@ -181,65 +198,95 @@ static __always_inline void atomic64_xor(s64 i, atomic64_t *v)
arch_atomic64_xor(i, v);
}
+#ifdef arch_atomic_inc_return
+#define atomic_inc_return atomic_inc_return
static __always_inline int atomic_inc_return(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_inc_return(v);
}
+#endif
+#ifdef arch_atomic64_in_return
+#define atomic64_inc_return atomic64_inc_return
static __always_inline s64 atomic64_inc_return(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_inc_return(v);
}
+#endif
+#ifdef arch_atomic_dec_return
+#define atomic_dec_return atomic_dec_return
static __always_inline int atomic_dec_return(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_dec_return(v);
}
+#endif
+#ifdef arch_atomic64_dec_return
+#define atomic64_dec_return atomic64_dec_return
static __always_inline s64 atomic64_dec_return(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_dec_return(v);
}
+#endif
-static __always_inline s64 atomic64_inc_not_zero(atomic64_t *v)
+#ifdef arch_atomic64_inc_not_zero
+#define atomic64_inc_not_zero atomic64_inc_not_zero
+static __always_inline bool atomic64_inc_not_zero(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_inc_not_zero(v);
}
+#endif
+#ifdef arch_atomic64_dec_if_positive
+#define atomic64_dec_if_positive atomic64_dec_if_positive
static __always_inline s64 atomic64_dec_if_positive(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_dec_if_positive(v);
}
+#endif
+#ifdef arch_atomic_dec_and_test
+#define atomic_dec_and_test atomic_dec_and_test
static __always_inline bool atomic_dec_and_test(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_dec_and_test(v);
}
+#endif
+#ifdef arch_atomic64_dec_and_test
+#define atomic64_dec_and_test atomic64_dec_and_test
static __always_inline bool atomic64_dec_and_test(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_dec_and_test(v);
}
+#endif
+#ifdef arch_atomic_inc_and_test
+#define atomic_inc_and_test atomic_inc_and_test
static __always_inline bool atomic_inc_and_test(atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_inc_and_test(v);
}
+#endif
+#ifdef arch_atomic64_inc_and_test
+#define atomic64_inc_and_test atomic64_inc_and_test
static __always_inline bool atomic64_inc_and_test(atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_inc_and_test(v);
}
+#endif
static __always_inline int atomic_add_return(int i, atomic_t *v)
{
@@ -325,152 +372,96 @@ static __always_inline s64 atomic64_fetch_xor(s64 i, atomic64_t *v)
return arch_atomic64_fetch_xor(i, v);
}
+#ifdef arch_atomic_sub_and_test
+#define atomic_sub_and_test atomic_sub_and_test
static __always_inline bool atomic_sub_and_test(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_sub_and_test(i, v);
}
+#endif
+#ifdef arch_atomic64_sub_and_test
+#define atomic64_sub_and_test atomic64_sub_and_test
static __always_inline bool atomic64_sub_and_test(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_sub_and_test(i, v);
}
+#endif
+#ifdef arch_atomic_add_negative
+#define atomic_add_negative atomic_add_negative
static __always_inline bool atomic_add_negative(int i, atomic_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic_add_negative(i, v);
}
+#endif
+#ifdef arch_atomic64_add_negative
+#define atomic64_add_negative atomic64_add_negative
static __always_inline bool atomic64_add_negative(s64 i, atomic64_t *v)
{
kasan_check_write(v, sizeof(*v));
return arch_atomic64_add_negative(i, v);
}
+#endif
-static __always_inline unsigned long
-cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new, int size)
-{
- kasan_check_write(ptr, size);
- switch (size) {
- case 1:
- return arch_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
- case 2:
- return arch_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
- case 4:
- return arch_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
- case 8:
- BUILD_BUG_ON(sizeof(unsigned long) != 8);
- return arch_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
- }
- BUILD_BUG();
- return 0;
-}
+#define xchg(ptr, new) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_xchg(__ai_ptr, (new)); \
+})
#define cmpxchg(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))cmpxchg_size((ptr), (unsigned long)(old), \
- (unsigned long)(new), sizeof(*(ptr)))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg(__ai_ptr, (old), (new)); \
})
-static __always_inline unsigned long
-sync_cmpxchg_size(volatile void *ptr, unsigned long old, unsigned long new,
- int size)
-{
- kasan_check_write(ptr, size);
- switch (size) {
- case 1:
- return arch_sync_cmpxchg((u8 *)ptr, (u8)old, (u8)new);
- case 2:
- return arch_sync_cmpxchg((u16 *)ptr, (u16)old, (u16)new);
- case 4:
- return arch_sync_cmpxchg((u32 *)ptr, (u32)old, (u32)new);
- case 8:
- BUILD_BUG_ON(sizeof(unsigned long) != 8);
- return arch_sync_cmpxchg((u64 *)ptr, (u64)old, (u64)new);
- }
- BUILD_BUG();
- return 0;
-}
-
#define sync_cmpxchg(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))sync_cmpxchg_size((ptr), \
- (unsigned long)(old), (unsigned long)(new), \
- sizeof(*(ptr)))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_sync_cmpxchg(__ai_ptr, (old), (new)); \
})
-static __always_inline unsigned long
-cmpxchg_local_size(volatile void *ptr, unsigned long old, unsigned long new,
- int size)
-{
- kasan_check_write(ptr, size);
- switch (size) {
- case 1:
- return arch_cmpxchg_local((u8 *)ptr, (u8)old, (u8)new);
- case 2:
- return arch_cmpxchg_local((u16 *)ptr, (u16)old, (u16)new);
- case 4:
- return arch_cmpxchg_local((u32 *)ptr, (u32)old, (u32)new);
- case 8:
- BUILD_BUG_ON(sizeof(unsigned long) != 8);
- return arch_cmpxchg_local((u64 *)ptr, (u64)old, (u64)new);
- }
- BUILD_BUG();
- return 0;
-}
-
#define cmpxchg_local(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))cmpxchg_local_size((ptr), \
- (unsigned long)(old), (unsigned long)(new), \
- sizeof(*(ptr)))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg_local(__ai_ptr, (old), (new)); \
})
-static __always_inline u64
-cmpxchg64_size(volatile u64 *ptr, u64 old, u64 new)
-{
- kasan_check_write(ptr, sizeof(*ptr));
- return arch_cmpxchg64(ptr, old, new);
-}
-
#define cmpxchg64(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))cmpxchg64_size((ptr), (u64)(old), \
- (u64)(new))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64(__ai_ptr, (old), (new)); \
})
-static __always_inline u64
-cmpxchg64_local_size(volatile u64 *ptr, u64 old, u64 new)
-{
- kasan_check_write(ptr, sizeof(*ptr));
- return arch_cmpxchg64_local(ptr, old, new);
-}
-
#define cmpxchg64_local(ptr, old, new) \
({ \
- ((__typeof__(*(ptr)))cmpxchg64_local_size((ptr), (u64)(old), \
- (u64)(new))); \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kasan_check_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ arch_cmpxchg64_local(__ai_ptr, (old), (new)); \
})
-/*
- * Originally we had the following code here:
- * __typeof__(p1) ____p1 = (p1);
- * kasan_check_write(____p1, 2 * sizeof(*____p1));
- * arch_cmpxchg_double(____p1, (p2), (o1), (o2), (n1), (n2));
- * But it leads to compilation failures (see gcc issue 72873).
- * So for now it's left non-instrumented.
- * There are few callers of cmpxchg_double(), so it's not critical.
- */
#define cmpxchg_double(p1, p2, o1, o2, n1, n2) \
({ \
- arch_cmpxchg_double((p1), (p2), (o1), (o2), (n1), (n2)); \
+ typeof(p1) __ai_p1 = (p1); \
+ kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \
+ arch_cmpxchg_double(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \
})
-#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
-({ \
- arch_cmpxchg_double_local((p1), (p2), (o1), (o2), (n1), (n2)); \
+#define cmpxchg_double_local(p1, p2, o1, o2, n1, n2) \
+({ \
+ typeof(p1) __ai_p1 = (p1); \
+ kasan_check_write(__ai_p1, 2 * sizeof(*__ai_p1)); \
+ arch_cmpxchg_double_local(__ai_p1, (p2), (o1), (o2), (n1), (n2)); \
})
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
index abe6dd9ca2a8..13324aa828eb 100644
--- a/include/asm-generic/atomic.h
+++ b/include/asm-generic/atomic.h
@@ -186,11 +186,6 @@ ATOMIC_OP(xor, ^)
#include <linux/irqflags.h>
-static inline int atomic_add_negative(int i, atomic_t *v)
-{
- return atomic_add_return(i, v) < 0;
-}
-
static inline void atomic_add(int i, atomic_t *v)
{
atomic_add_return(i, v);
@@ -201,35 +196,7 @@ static inline void atomic_sub(int i, atomic_t *v)
atomic_sub_return(i, v);
}
-static inline void atomic_inc(atomic_t *v)
-{
- atomic_add_return(1, v);
-}
-
-static inline void atomic_dec(atomic_t *v)
-{
- atomic_sub_return(1, v);
-}
-
-#define atomic_dec_return(v) atomic_sub_return(1, (v))
-#define atomic_inc_return(v) atomic_add_return(1, (v))
-
-#define atomic_sub_and_test(i, v) (atomic_sub_return((i), (v)) == 0)
-#define atomic_dec_and_test(v) (atomic_dec_return(v) == 0)
-#define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
-
#define atomic_xchg(ptr, v) (xchg(&(ptr)->counter, (v)))
#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), (old), (new)))
-#ifndef __atomic_add_unless
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
-{
- int c, old;
- c = atomic_read(v);
- while (c != u && (old = atomic_cmpxchg(v, c, c + a)) != c)
- c = old;
- return c;
-}
-#endif
-
#endif /* __ASM_GENERIC_ATOMIC_H */
diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
index 8d28eb010d0d..97b28b7f1f29 100644
--- a/include/asm-generic/atomic64.h
+++ b/include/asm-generic/atomic64.h
@@ -11,6 +11,7 @@
*/
#ifndef _ASM_GENERIC_ATOMIC64_H
#define _ASM_GENERIC_ATOMIC64_H
+#include <linux/types.h>
typedef struct {
long long counter;
@@ -50,18 +51,10 @@ ATOMIC64_OPS(xor)
#undef ATOMIC64_OP
extern long long atomic64_dec_if_positive(atomic64_t *v);
+#define atomic64_dec_if_positive atomic64_dec_if_positive
extern long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n);
extern long long atomic64_xchg(atomic64_t *v, long long new);
-extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
-
-#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
-#define atomic64_inc(v) atomic64_add(1LL, (v))
-#define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
-#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
-#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
-#define atomic64_dec(v) atomic64_sub(1LL, (v))
-#define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
-#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
-#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
+extern long long atomic64_fetch_add_unless(atomic64_t *v, long long a, long long u);
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#endif /* _ASM_GENERIC_ATOMIC64_H */
diff --git a/include/asm-generic/bitops/atomic.h b/include/asm-generic/bitops/atomic.h
index 04deffaf5f7d..dd90c9792909 100644
--- a/include/asm-generic/bitops/atomic.h
+++ b/include/asm-generic/bitops/atomic.h
@@ -2,189 +2,67 @@
#ifndef _ASM_GENERIC_BITOPS_ATOMIC_H_
#define _ASM_GENERIC_BITOPS_ATOMIC_H_
-#include <asm/types.h>
-#include <linux/irqflags.h>
-
-#ifdef CONFIG_SMP
-#include <asm/spinlock.h>
-#include <asm/cache.h> /* we use L1_CACHE_BYTES */
-
-/* Use an array of spinlocks for our atomic_ts.
- * Hash function to index into a different SPINLOCK.
- * Since "a" is usually an address, use one spinlock per cacheline.
- */
-# define ATOMIC_HASH_SIZE 4
-# define ATOMIC_HASH(a) (&(__atomic_hash[ (((unsigned long) a)/L1_CACHE_BYTES) & (ATOMIC_HASH_SIZE-1) ]))
-
-extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
-
-/* Can't use raw_spin_lock_irq because of #include problems, so
- * this is the substitute */
-#define _atomic_spin_lock_irqsave(l,f) do { \
- arch_spinlock_t *s = ATOMIC_HASH(l); \
- local_irq_save(f); \
- arch_spin_lock(s); \
-} while(0)
-
-#define _atomic_spin_unlock_irqrestore(l,f) do { \
- arch_spinlock_t *s = ATOMIC_HASH(l); \
- arch_spin_unlock(s); \
- local_irq_restore(f); \
-} while(0)
-
-
-#else
-# define _atomic_spin_lock_irqsave(l,f) do { local_irq_save(f); } while (0)
-# define _atomic_spin_unlock_irqrestore(l,f) do { local_irq_restore(f); } while (0)
-#endif
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
/*
- * NMI events can occur at any time, including when interrupts have been
- * disabled by *_irqsave(). So you can get NMI events occurring while a
- * *_bit function is holding a spin lock. If the NMI handler also wants
- * to do bit manipulation (and they do) then you can get a deadlock
- * between the original caller of *_bit() and the NMI handler.
- *
- * by Keith Owens
+ * Implementation of atomic bitops using atomic-fetch ops.
+ * See Documentation/atomic_bitops.txt for details.
*/
-/**
- * set_bit - Atomically set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This function is atomic and may not be reordered. See __set_bit()
- * if you do not require the atomic guarantees.
- *
- * Note: there are no guarantees that this function will not be reordered
- * on non x86 architectures, so if you are writing portable code,
- * make sure not to rely on its reordering guarantees.
- *
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static inline void set_bit(int nr, volatile unsigned long *addr)
+static inline void set_bit(unsigned int nr, volatile unsigned long *p)
{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long flags;
-
- _atomic_spin_lock_irqsave(p, flags);
- *p |= mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ atomic_long_or(BIT_MASK(nr), (atomic_long_t *)p);
}
-/**
- * clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * clear_bit() is atomic and may not be reordered. However, it does
- * not contain a memory barrier, so if it is used for locking purposes,
- * you should call smp_mb__before_atomic() and/or smp_mb__after_atomic()
- * in order to ensure changes are visible on other processors.
- */
-static inline void clear_bit(int nr, volatile unsigned long *addr)
+static inline void clear_bit(unsigned int nr, volatile unsigned long *p)
{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long flags;
-
- _atomic_spin_lock_irqsave(p, flags);
- *p &= ~mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ atomic_long_andnot(BIT_MASK(nr), (atomic_long_t *)p);
}
-/**
- * change_bit - Toggle a bit in memory
- * @nr: Bit to change
- * @addr: Address to start counting from
- *
- * change_bit() is atomic and may not be reordered. It may be
- * reordered on other architectures than x86.
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static inline void change_bit(int nr, volatile unsigned long *addr)
+static inline void change_bit(unsigned int nr, volatile unsigned long *p)
{
- unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long flags;
-
- _atomic_spin_lock_irqsave(p, flags);
- *p ^= mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ atomic_long_xor(BIT_MASK(nr), (atomic_long_t *)p);
}
-/**
- * test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It may be reordered on other architectures than x86.
- * It also implies a memory barrier.
- */
-static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_set_bit(unsigned int nr, volatile unsigned long *p)
{
+ long old;
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old;
- unsigned long flags;
- _atomic_spin_lock_irqsave(p, flags);
- old = *p;
- *p = old | mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ if (READ_ONCE(*p) & mask)
+ return 1;
- return (old & mask) != 0;
+ old = atomic_long_fetch_or(mask, (atomic_long_t *)p);
+ return !!(old & mask);
}
-/**
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It can be reorderdered on other architectures other than x86.
- * It also implies a memory barrier.
- */
-static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_clear_bit(unsigned int nr, volatile unsigned long *p)
{
+ long old;
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old;
- unsigned long flags;
- _atomic_spin_lock_irqsave(p, flags);
- old = *p;
- *p = old & ~mask;
- _atomic_spin_unlock_irqrestore(p, flags);
+ p += BIT_WORD(nr);
+ if (!(READ_ONCE(*p) & mask))
+ return 0;
- return (old & mask) != 0;
+ old = atomic_long_fetch_andnot(mask, (atomic_long_t *)p);
+ return !!(old & mask);
}
-/**
- * test_and_change_bit - Change a bit and return its old value
- * @nr: Bit to change
- * @addr: Address to count from
- *
- * This operation is atomic and cannot be reordered.
- * It also implies a memory barrier.
- */
-static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+static inline int test_and_change_bit(unsigned int nr, volatile unsigned long *p)
{
+ long old;
unsigned long mask = BIT_MASK(nr);
- unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr);
- unsigned long old;
- unsigned long flags;
-
- _atomic_spin_lock_irqsave(p, flags);
- old = *p;
- *p = old ^ mask;
- _atomic_spin_unlock_irqrestore(p, flags);
- return (old & mask) != 0;
+ p += BIT_WORD(nr);
+ old = atomic_long_fetch_xor(mask, (atomic_long_t *)p);
+ return !!(old & mask);
}
#endif /* _ASM_GENERIC_BITOPS_ATOMIC_H */
diff --git a/include/asm-generic/bitops/lock.h b/include/asm-generic/bitops/lock.h
index 67ab280ad134..3ae021368f48 100644
--- a/include/asm-generic/bitops/lock.h
+++ b/include/asm-generic/bitops/lock.h
@@ -2,6 +2,10 @@
#ifndef _ASM_GENERIC_BITOPS_LOCK_H_
#define _ASM_GENERIC_BITOPS_LOCK_H_
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <asm/barrier.h>
+
/**
* test_and_set_bit_lock - Set a bit and return its old value, for lock
* @nr: Bit to set
@@ -11,7 +15,20 @@
* the returned value is 0.
* It can be used to implement bit locks.
*/
-#define test_and_set_bit_lock(nr, addr) test_and_set_bit(nr, addr)
+static inline int test_and_set_bit_lock(unsigned int nr,
+ volatile unsigned long *p)
+{
+ long old;
+ unsigned long mask = BIT_MASK(nr);
+
+ p += BIT_WORD(nr);
+ if (READ_ONCE(*p) & mask)
+ return 1;
+
+ old = atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
+ return !!(old & mask);
+}
+
/**
* clear_bit_unlock - Clear a bit in memory, for unlock
@@ -20,11 +37,11 @@
*
* This operation is atomic and provides release barrier semantics.
*/
-#define clear_bit_unlock(nr, addr) \
-do { \
- smp_mb__before_atomic(); \
- clear_bit(nr, addr); \
-} while (0)
+static inline void clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
+{
+ p += BIT_WORD(nr);
+ atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
+}
/**
* __clear_bit_unlock - Clear a bit in memory, for unlock
@@ -37,11 +54,38 @@ do { \
*
* See for example x86's implementation.
*/
-#define __clear_bit_unlock(nr, addr) \
-do { \
- smp_mb__before_atomic(); \
- clear_bit(nr, addr); \
-} while (0)
+static inline void __clear_bit_unlock(unsigned int nr,
+ volatile unsigned long *p)
+{
+ unsigned long old;
-#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
+ p += BIT_WORD(nr);
+ old = READ_ONCE(*p);
+ old &= ~BIT_MASK(nr);
+ atomic_long_set_release((atomic_long_t *)p, old);
+}
+
+/**
+ * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
+ * byte is negative, for unlock.
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * This is a bit of a one-trick-pony for the filemap code, which clears
+ * PG_locked and tests PG_waiters,
+ */
+#ifndef clear_bit_unlock_is_negative_byte
+static inline bool clear_bit_unlock_is_negative_byte(unsigned int nr,
+ volatile unsigned long *p)
+{
+ long old;
+ unsigned long mask = BIT_MASK(nr);
+
+ p += BIT_WORD(nr);
+ old = atomic_long_fetch_andnot_release(mask, (atomic_long_t *)p);
+ return !!(old & BIT(7));
+}
+#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
+#endif
+#endif /* _ASM_GENERIC_BITOPS_LOCK_H_ */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index a7613e1b0c87..20561a60db9c 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -75,9 +75,19 @@ struct bug_entry {
/*
* WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
- * significant issues that need prompt attention if they should ever
- * appear at runtime. Use the versions with printk format strings
- * to provide better diagnostics.
+ * significant kernel issues that need prompt attention if they should ever
+ * appear at runtime.
+ *
+ * Do not use these macros when checking for invalid external inputs
+ * (e.g. invalid system call arguments, or invalid data coming from
+ * network/devices), and on transient conditions like ENOMEM or EAGAIN.
+ * These macros should be used for recoverable kernel issues only.
+ * For invalid external inputs, transient conditions, etc use
+ * pr_err[_once/_ratelimited]() followed by dump_stack(), if necessary.
+ * Do not include "BUG"/"WARNING" in format strings manually to make these
+ * conditions distinguishable from kernel issues.
+ *
+ * Use the versions with printk format strings to provide better diagnostics.
*/
#ifndef __WARN_TAINT
extern __printf(3, 4)
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index 68efb950a918..4d73e6e3c66c 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -5,12 +5,10 @@
#define KSYM_FUNC(x) x
#endif
#ifdef CONFIG_64BIT
-#define __put .quad
#ifndef KSYM_ALIGN
#define KSYM_ALIGN 8
#endif
#else
-#define __put .long
#ifndef KSYM_ALIGN
#define KSYM_ALIGN 4
#endif
@@ -19,6 +17,16 @@
#define KCRC_ALIGN 4
#endif
+.macro __put, val, name
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+ .long \val - ., \name - .
+#elif defined(CONFIG_64BIT)
+ .quad \val, \name
+#else
+ .long \val, \name
+#endif
+.endm
+
/*
* note on .section use: @progbits vs %progbits nastiness doesn't matter,
* since we immediately emit into those sections anyway.
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index f59639afaa39..88ebc6102c7c 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -1019,8 +1019,8 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot);
int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot);
int pud_clear_huge(pud_t *pud);
int pmd_clear_huge(pmd_t *pmd);
-int pud_free_pmd_page(pud_t *pud);
-int pmd_free_pte_page(pmd_t *pmd);
+int pud_free_pmd_page(pud_t *pud, unsigned long addr);
+int pmd_free_pte_page(pmd_t *pmd, unsigned long addr);
#else /* !CONFIG_HAVE_ARCH_HUGE_VMAP */
static inline int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot)
{
@@ -1046,11 +1046,11 @@ static inline int pmd_clear_huge(pmd_t *pmd)
{
return 0;
}
-static inline int pud_free_pmd_page(pud_t *pud)
+static inline int pud_free_pmd_page(pud_t *pud, unsigned long addr)
{
return 0;
}
-static inline int pmd_free_pte_page(pmd_t *pmd)
+static inline int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
{
return 0;
}
@@ -1083,6 +1083,36 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
static inline void init_espfix_bsp(void) { }
#endif
+#ifndef __HAVE_ARCH_PFN_MODIFY_ALLOWED
+static inline bool pfn_modify_allowed(unsigned long pfn, pgprot_t prot)
+{
+ return true;
+}
+
+static inline bool arch_has_pfn_modify_check(void)
+{
+ return false;
+}
+#endif /* !_HAVE_ARCH_PFN_MODIFY_ALLOWED */
+
+/*
+ * Architecture PAGE_KERNEL_* fallbacks
+ *
+ * Some architectures don't define certain PAGE_KERNEL_* flags. This is either
+ * because they really don't support them, or the port needs to be updated to
+ * reflect the required functionality. Below are a set of relatively safe
+ * fallbacks, as best effort, which we can count on in lieu of the architectures
+ * not defining them on their own yet.
+ */
+
+#ifndef PAGE_KERNEL_RO
+# define PAGE_KERNEL_RO PAGE_KERNEL
+#endif
+
+#ifndef PAGE_KERNEL_EXEC
+# define PAGE_KERNEL_EXEC PAGE_KERNEL
+#endif
+
#endif /* !__ASSEMBLY__ */
#ifndef io_remap_pfn_range
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
index 0763f065b975..d10f1e7d6ba8 100644
--- a/include/asm-generic/qspinlock_types.h
+++ b/include/asm-generic/qspinlock_types.h
@@ -63,7 +63,7 @@ typedef struct qspinlock {
/*
* Initializier
*/
-#define __ARCH_SPIN_LOCK_UNLOCKED { .val = ATOMIC_INIT(0) }
+#define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } }
/*
* Bitfields in the atomic value:
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index faddde44de8c..b3353e21f3b3 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -15,6 +15,7 @@
#ifndef _ASM_GENERIC__TLB_H
#define _ASM_GENERIC__TLB_H
+#include <linux/mmu_notifier.h>
#include <linux/swap.h>
#include <asm/pgalloc.h>
#include <asm/tlbflush.h>
@@ -138,6 +139,16 @@ static inline void __tlb_reset_range(struct mmu_gather *tlb)
}
}
+static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
+{
+ if (!tlb->end)
+ return;
+
+ tlb_flush(tlb);
+ mmu_notifier_invalidate_range(tlb->mm, tlb->start, tlb->end);
+ __tlb_reset_range(tlb);
+}
+
static inline void tlb_remove_page_size(struct mmu_gather *tlb,
struct page *page, int page_size)
{
@@ -186,10 +197,8 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
#define __tlb_end_vma(tlb, vma) \
do { \
- if (!tlb->fullmm && tlb->end) { \
- tlb_flush(tlb); \
- __tlb_reset_range(tlb); \
- } \
+ if (!tlb->fullmm) \
+ tlb_flush_mmu_tlbonly(tlb); \
} while (0)
#ifndef tlb_end_vma
@@ -265,33 +274,41 @@ static inline void tlb_remove_check_page_size_change(struct mmu_gather *tlb,
* For now w.r.t page table cache, mark the range_size as PAGE_SIZE
*/
+#ifndef pte_free_tlb
#define pte_free_tlb(tlb, ptep, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
__pte_free_tlb(tlb, ptep, address); \
} while (0)
+#endif
+#ifndef pmd_free_tlb
#define pmd_free_tlb(tlb, pmdp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
__pmd_free_tlb(tlb, pmdp, address); \
} while (0)
+#endif
#ifndef __ARCH_HAS_4LEVEL_HACK
+#ifndef pud_free_tlb
#define pud_free_tlb(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif
+#endif
#ifndef __ARCH_HAS_5LEVEL_HACK
+#ifndef p4d_free_tlb
#define p4d_free_tlb(tlb, pudp, address) \
do { \
__tlb_adjust_range(tlb, address, PAGE_SIZE); \
__p4d_free_tlb(tlb, pudp, address); \
} while (0)
#endif
+#endif
#define tlb_migrate_finish(mm) do {} while (0)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index e373e2e10f6a..f173b5f30dbe 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -218,7 +218,6 @@
#define TIMER_OF_TABLES() OF_TABLE(CONFIG_TIMER_OF, timer)
#define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip)
#define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk)
-#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu)
#define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem)
#define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method)
#define CPUIDLE_METHOD_OF_TABLES() OF_TABLE(CONFIG_CPU_IDLE, cpuidle_method)
@@ -601,7 +600,6 @@
CLK_OF_TABLES() \
RESERVEDMEM_OF_TABLES() \
TIMER_OF_TABLES() \
- IOMMU_OF_TABLES() \
CPU_METHOD_OF_TABLES() \
CPUIDLE_METHOD_OF_TABLES() \
KERNEL_DTB() \
diff --git a/include/crypto/dh.h b/include/crypto/dh.h
index 71e1bb24d79f..7e0dad94cb2b 100644
--- a/include/crypto/dh.h
+++ b/include/crypto/dh.h
@@ -29,17 +29,21 @@
*
* @key: Private DH key
* @p: Diffie-Hellman parameter P
+ * @q: Diffie-Hellman parameter Q
* @g: Diffie-Hellman generator G
* @key_size: Size of the private DH key
* @p_size: Size of DH parameter P
+ * @q_size: Size of DH parameter Q
* @g_size: Size of DH generator G
*/
struct dh {
void *key;
void *p;
+ void *q;
void *g;
unsigned int key_size;
unsigned int p_size;
+ unsigned int q_size;
unsigned int g_size;
};
diff --git a/include/crypto/drbg.h b/include/crypto/drbg.h
index 8f941102af36..3fb581bf3b87 100644
--- a/include/crypto/drbg.h
+++ b/include/crypto/drbg.h
@@ -122,11 +122,10 @@ struct drbg_state {
struct crypto_skcipher *ctr_handle; /* CTR mode cipher handle */
struct skcipher_request *ctr_req; /* CTR mode request handle */
- __u8 *ctr_null_value_buf; /* CTR mode unaligned buffer */
- __u8 *ctr_null_value; /* CTR mode aligned zero buf */
__u8 *outscratchpadbuf; /* CTR mode output scratchpad */
__u8 *outscratchpad; /* CTR mode aligned outbuf */
struct crypto_wait ctr_wait; /* CTR mode async wait obj */
+ struct scatterlist sg_in, sg_out; /* CTR mode SGLs */
bool seeded; /* DRBG fully seeded? */
bool pr; /* Prediction resistance enabled? */
diff --git a/include/crypto/if_alg.h b/include/crypto/if_alg.h
index cc414db9da0a..482461d8931d 100644
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -245,7 +245,8 @@ ssize_t af_alg_sendpage(struct socket *sock, struct page *page,
int offset, size_t size, int flags);
void af_alg_free_resources(struct af_alg_async_req *areq);
void af_alg_async_cb(struct crypto_async_request *_req, int err);
-__poll_t af_alg_poll_mask(struct socket *sock, __poll_t events);
+__poll_t af_alg_poll(struct file *file, struct socket *sock,
+ poll_table *wait);
struct af_alg_async_req *af_alg_alloc_areq(struct sock *sk,
unsigned int areqlen);
int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
diff --git a/include/crypto/scatterwalk.h b/include/crypto/scatterwalk.h
index 880e6be9e95e..a66c127a20ed 100644
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -22,27 +22,14 @@
#include <linux/scatterlist.h>
static inline void scatterwalk_crypto_chain(struct scatterlist *head,
- struct scatterlist *sg,
- int chain, int num)
+ struct scatterlist *sg, int num)
{
- if (chain) {
- head->length += sg->length;
- sg = sg_next(sg);
- }
-
if (sg)
sg_chain(head, num, sg);
else
sg_mark_end(head);
}
-static inline unsigned long scatterwalk_samebuf(struct scatter_walk *walk_in,
- struct scatter_walk *walk_out)
-{
- return !(((sg_page(walk_in->sg) - sg_page(walk_out->sg)) << PAGE_SHIFT) +
- (int)(walk_in->offset - walk_out->offset));
-}
-
static inline unsigned int scatterwalk_pagelen(struct scatter_walk *walk)
{
unsigned int len = walk->sg->offset + walk->sg->length - walk->offset;
diff --git a/include/crypto/sha.h b/include/crypto/sha.h
index 0555b571dd34..8a46202b1857 100644
--- a/include/crypto/sha.h
+++ b/include/crypto/sha.h
@@ -71,6 +71,10 @@ extern const u8 sha224_zero_message_hash[SHA224_DIGEST_SIZE];
extern const u8 sha256_zero_message_hash[SHA256_DIGEST_SIZE];
+extern const u8 sha384_zero_message_hash[SHA384_DIGEST_SIZE];
+
+extern const u8 sha512_zero_message_hash[SHA512_DIGEST_SIZE];
+
struct sha1_state {
u32 state[SHA1_DIGEST_SIZE / 4];
u64 count;
diff --git a/include/crypto/vmac.h b/include/crypto/vmac.h
deleted file mode 100644
index 6b700c7b2fe1..000000000000
--- a/include/crypto/vmac.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Modified to interface to the Linux kernel
- * Copyright (c) 2009, Intel Corporation.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
- * more details.
- *
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
- * Place - Suite 330, Boston, MA 02111-1307 USA.
- */
-
-#ifndef __CRYPTO_VMAC_H
-#define __CRYPTO_VMAC_H
-
-/* --------------------------------------------------------------------------
- * VMAC and VHASH Implementation by Ted Krovetz (tdk@acm.org) and Wei Dai.
- * This implementation is herby placed in the public domain.
- * The authors offers no warranty. Use at your own risk.
- * Please send bug reports to the authors.
- * Last modified: 17 APR 08, 1700 PDT
- * ----------------------------------------------------------------------- */
-
-/*
- * User definable settings.
- */
-#define VMAC_TAG_LEN 64
-#define VMAC_KEY_SIZE 128/* Must be 128, 192 or 256 */
-#define VMAC_KEY_LEN (VMAC_KEY_SIZE/8)
-#define VMAC_NHBYTES 128/* Must 2^i for any 3 < i < 13 Standard = 128*/
-
-/*
- * This implementation uses u32 and u64 as names for unsigned 32-
- * and 64-bit integer types. These are defined in C99 stdint.h. The
- * following may need adaptation if you are not running a C99 or
- * Microsoft C environment.
- */
-struct vmac_ctx {
- u64 nhkey[(VMAC_NHBYTES/8)+2*(VMAC_TAG_LEN/64-1)];
- u64 polykey[2*VMAC_TAG_LEN/64];
- u64 l3key[2*VMAC_TAG_LEN/64];
- u64 polytmp[2*VMAC_TAG_LEN/64];
- u64 cached_nonce[2];
- u64 cached_aes[2];
- int first_block_processed;
-};
-
-typedef u64 vmac_t;
-
-struct vmac_ctx_t {
- struct crypto_cipher *child;
- struct vmac_ctx __vmac_ctx;
- u8 partial[VMAC_NHBYTES]; /* partial block */
- int partial_size; /* size of the partial block */
-};
-
-#endif /* __CRYPTO_VMAC_H */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index f5099c12c6a6..f7a19c2a7a80 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -97,29 +97,11 @@ struct pci_controller;
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
-/**
- * drm_drv_uses_atomic_modeset - check if the driver implements
- * atomic_commit()
- * @dev: DRM device
- *
- * This check is useful if drivers do not have DRIVER_ATOMIC set but
- * have atomic modesetting internally implemented.
- */
-static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
-{
- return dev->mode_config.funcs->atomic_commit != NULL;
-}
-
#define DRM_SWITCH_POWER_ON 0
#define DRM_SWITCH_POWER_OFF 1
#define DRM_SWITCH_POWER_CHANGING 2
#define DRM_SWITCH_POWER_DYNAMIC_OFF 3
-static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
-{
- return dev->driver->driver_features & feature;
-}
-
/* returns true if currently okay to sleep */
static inline bool drm_can_sleep(void)
{
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index a57a8aa90ffb..da9d95a19580 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -160,6 +160,14 @@ struct __drm_crtcs_state {
struct __drm_connnectors_state {
struct drm_connector *ptr;
struct drm_connector_state *state, *old_state, *new_state;
+ /**
+ * @out_fence_ptr:
+ *
+ * User-provided pointer which the kernel uses to return a sync_file
+ * file descriptor. Used by writeback connectors to signal completion of
+ * the writeback.
+ */
+ s32 __user *out_fence_ptr;
};
struct drm_private_obj;
@@ -594,6 +602,9 @@ void drm_atomic_set_fence_for_plane(struct drm_plane_state *plane_state,
int __must_check
drm_atomic_set_crtc_for_connector(struct drm_connector_state *conn_state,
struct drm_crtc *crtc);
+int drm_atomic_set_writeback_fb_for_connector(
+ struct drm_connector_state *conn_state,
+ struct drm_framebuffer *fb);
int __must_check
drm_atomic_add_affected_connectors(struct drm_atomic_state *state,
struct drm_crtc *crtc);
@@ -601,9 +612,6 @@ int __must_check
drm_atomic_add_affected_planes(struct drm_atomic_state *state,
struct drm_crtc *crtc);
-void
-drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret);
-
int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
int __must_check drm_atomic_commit(struct drm_atomic_state *state);
int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index 26aaba58d6ce..99e2a5297c69 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -100,6 +100,7 @@ int __must_check drm_atomic_helper_swap_state(struct drm_atomic_state *state,
int drm_atomic_helper_setup_commit(struct drm_atomic_state *state,
bool nonblock);
void drm_atomic_helper_wait_for_dependencies(struct drm_atomic_state *state);
+void drm_atomic_helper_fake_vblank(struct drm_atomic_state *state);
void drm_atomic_helper_commit_hw_done(struct drm_atomic_state *state);
void drm_atomic_helper_commit_cleanup_done(struct drm_atomic_state *state);
diff --git a/include/drm/drm_audio_component.h b/include/drm/drm_audio_component.h
new file mode 100644
index 000000000000..4923b00328c1
--- /dev/null
+++ b/include/drm/drm_audio_component.h
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: MIT
+// Copyright © 2014 Intel Corporation
+
+#ifndef _DRM_AUDIO_COMPONENT_H_
+#define _DRM_AUDIO_COMPONENT_H_
+
+struct drm_audio_component;
+
+/**
+ * struct drm_audio_component_ops - Ops implemented by DRM driver, called by hda driver
+ */
+struct drm_audio_component_ops {
+ /**
+ * @owner: drm module to pin down
+ */
+ struct module *owner;
+ /**
+ * @get_power: get the POWER_DOMAIN_AUDIO power well
+ *
+ * Request the power well to be turned on.
+ */
+ void (*get_power)(struct device *);
+ /**
+ * @put_power: put the POWER_DOMAIN_AUDIO power well
+ *
+ * Allow the power well to be turned off.
+ */
+ void (*put_power)(struct device *);
+ /**
+ * @codec_wake_override: Enable/disable codec wake signal
+ */
+ void (*codec_wake_override)(struct device *, bool enable);
+ /**
+ * @get_cdclk_freq: Get the Core Display Clock in kHz
+ */
+ int (*get_cdclk_freq)(struct device *);
+ /**
+ * @sync_audio_rate: set n/cts based on the sample rate
+ *
+ * Called from audio driver. After audio driver sets the
+ * sample rate, it will call this function to set n/cts
+ */
+ int (*sync_audio_rate)(struct device *, int port, int pipe, int rate);
+ /**
+ * @get_eld: fill the audio state and ELD bytes for the given port
+ *
+ * Called from audio driver to get the HDMI/DP audio state of the given
+ * digital port, and also fetch ELD bytes to the given pointer.
+ *
+ * It returns the byte size of the original ELD (not the actually
+ * copied size), zero for an invalid ELD, or a negative error code.
+ *
+ * Note that the returned size may be over @max_bytes. Then it
+ * implies that only a part of ELD has been copied to the buffer.
+ */
+ int (*get_eld)(struct device *, int port, int pipe, bool *enabled,
+ unsigned char *buf, int max_bytes);
+};
+
+/**
+ * struct drm_audio_component_audio_ops - Ops implemented by hda driver, called by DRM driver
+ */
+struct drm_audio_component_audio_ops {
+ /**
+ * @audio_ptr: Pointer to be used in call to pin_eld_notify
+ */
+ void *audio_ptr;
+ /**
+ * @pin_eld_notify: Notify the HDA driver that pin sense and/or ELD information has changed
+ *
+ * Called when the DRM driver has set up audio pipeline or has just
+ * begun to tear it down. This allows the HDA driver to update its
+ * status accordingly (even when the HDA controller is in power save
+ * mode).
+ */
+ void (*pin_eld_notify)(void *audio_ptr, int port, int pipe);
+ /**
+ * @pin2port: Check and convert from pin node to port number
+ *
+ * Called by HDA driver to check and convert from the pin widget node
+ * number to a port number in the graphics side.
+ */
+ int (*pin2port)(void *audio_ptr, int pin);
+ /**
+ * @master_bind: (Optional) component master bind callback
+ *
+ * Called at binding master component, for HDA codec-specific
+ * handling of dynamic binding.
+ */
+ int (*master_bind)(struct device *dev, struct drm_audio_component *);
+ /**
+ * @master_unbind: (Optional) component master unbind callback
+ *
+ * Called at unbinding master component, for HDA codec-specific
+ * handling of dynamic unbinding.
+ */
+ void (*master_unbind)(struct device *dev, struct drm_audio_component *);
+};
+
+/**
+ * struct drm_audio_component - Used for direct communication between DRM and hda drivers
+ */
+struct drm_audio_component {
+ /**
+ * @dev: DRM device, used as parameter for ops
+ */
+ struct device *dev;
+ /**
+ * @ops: Ops implemented by DRM driver, called by hda driver
+ */
+ const struct drm_audio_component_ops *ops;
+ /**
+ * @audio_ops: Ops implemented by hda driver, called by DRM driver
+ */
+ const struct drm_audio_component_audio_ops *audio_ops;
+};
+
+#endif /* _DRM_AUDIO_COMPONENT_H_ */
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 3270fec46979..bd850747ce54 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -97,7 +97,7 @@ struct drm_bridge_funcs {
/**
* @mode_fixup:
*
- * This callback is used to validate and adjust a mode. The paramater
+ * This callback is used to validate and adjust a mode. The parameter
* mode is the display mode that should be fed to the next element in
* the display chain, either the final &drm_connector or the next
* &drm_bridge. The parameter adjusted_mode is the input mode the bridge
@@ -178,6 +178,22 @@ struct drm_bridge_funcs {
* then this would be &drm_encoder_helper_funcs.mode_set. The display
* pipe (i.e. clocks and timing signals) is off when this function is
* called.
+ *
+ * The adjusted_mode parameter is the mode output by the CRTC for the
+ * first bridge in the chain. It can be different from the mode
+ * parameter that contains the desired mode for the connector at the end
+ * of the bridges chain, for instance when the first bridge in the chain
+ * performs scaling. The adjusted mode is mostly useful for the first
+ * bridge in the chain and is likely irrelevant for the other bridges.
+ *
+ * For atomic drivers the adjusted_mode is the mode stored in
+ * &drm_crtc_state.adjusted_mode.
+ *
+ * NOTE:
+ *
+ * If a need arises to store and access modes adjusted for other
+ * locations than the connection between the CRTC and the first bridge,
+ * the DRM framework will have to be extended with DRM bridge states.
*/
void (*mode_set)(struct drm_bridge *bridge,
struct drm_display_mode *mode,
@@ -254,27 +270,29 @@ struct drm_bridge_timings {
/**
* struct drm_bridge - central DRM bridge control structure
- * @dev: DRM device this bridge belongs to
- * @encoder: encoder to which this bridge is connected
- * @next: the next bridge in the encoder chain
- * @of_node: device node pointer to the bridge
- * @list: to keep track of all added bridges
- * @timings: the timing specification for the bridge, if any (may
- * be NULL)
- * @funcs: control functions
- * @driver_private: pointer to the bridge driver's internal context
*/
struct drm_bridge {
+ /** @dev: DRM device this bridge belongs to */
struct drm_device *dev;
+ /** @encoder: encoder to which this bridge is connected */
struct drm_encoder *encoder;
+ /** @next: the next bridge in the encoder chain */
struct drm_bridge *next;
#ifdef CONFIG_OF
+ /** @of_node: device node pointer to the bridge */
struct device_node *of_node;
#endif
+ /** @list: to keep track of all added bridges */
struct list_head list;
+ /**
+ * @timings:
+ *
+ * the timing specification for the bridge, if any (may be NULL)
+ */
const struct drm_bridge_timings *timings;
-
+ /** @funcs: control functions */
const struct drm_bridge_funcs *funcs;
+ /** @driver_private: pointer to the bridge driver's internal context */
void *driver_private;
};
@@ -285,15 +303,15 @@ int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
struct drm_bridge *previous);
bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge,
const struct drm_display_mode *mode);
void drm_bridge_disable(struct drm_bridge *bridge);
void drm_bridge_post_disable(struct drm_bridge *bridge);
void drm_bridge_mode_set(struct drm_bridge *bridge,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
void drm_bridge_pre_enable(struct drm_bridge *bridge);
void drm_bridge_enable(struct drm_bridge *bridge);
diff --git a/include/drm/drm_client.h b/include/drm/drm_client.h
new file mode 100644
index 000000000000..989f8e52864d
--- /dev/null
+++ b/include/drm/drm_client.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DRM_CLIENT_H_
+#define _DRM_CLIENT_H_
+
+#include <linux/types.h>
+
+struct drm_client_dev;
+struct drm_device;
+struct drm_file;
+struct drm_framebuffer;
+struct drm_gem_object;
+struct drm_minor;
+struct module;
+
+/**
+ * struct drm_client_funcs - DRM client callbacks
+ */
+struct drm_client_funcs {
+ /**
+ * @owner: The module owner
+ */
+ struct module *owner;
+
+ /**
+ * @unregister:
+ *
+ * Called when &drm_device is unregistered. The client should respond by
+ * releasing it's resources using drm_client_release().
+ *
+ * This callback is optional.
+ */
+ void (*unregister)(struct drm_client_dev *client);
+
+ /**
+ * @restore:
+ *
+ * Called on drm_lastclose(). The first client instance in the list that
+ * returns zero gets the privilege to restore and no more clients are
+ * called. This callback is not called after @unregister has been called.
+ *
+ * This callback is optional.
+ */
+ int (*restore)(struct drm_client_dev *client);
+
+ /**
+ * @hotplug:
+ *
+ * Called on drm_kms_helper_hotplug_event().
+ * This callback is not called after @unregister has been called.
+ *
+ * This callback is optional.
+ */
+ int (*hotplug)(struct drm_client_dev *client);
+};
+
+/**
+ * struct drm_client_dev - DRM client instance
+ */
+struct drm_client_dev {
+ /**
+ * @dev: DRM device
+ */
+ struct drm_device *dev;
+
+ /**
+ * @name: Name of the client.
+ */
+ const char *name;
+
+ /**
+ * @list:
+ *
+ * List of all clients of a DRM device, linked into
+ * &drm_device.clientlist. Protected by &drm_device.clientlist_mutex.
+ */
+ struct list_head list;
+
+ /**
+ * @funcs: DRM client functions (optional)
+ */
+ const struct drm_client_funcs *funcs;
+
+ /**
+ * @file: DRM file
+ */
+ struct drm_file *file;
+};
+
+int drm_client_new(struct drm_device *dev, struct drm_client_dev *client,
+ const char *name, const struct drm_client_funcs *funcs);
+void drm_client_release(struct drm_client_dev *client);
+
+void drm_client_dev_unregister(struct drm_device *dev);
+void drm_client_dev_hotplug(struct drm_device *dev);
+void drm_client_dev_restore(struct drm_device *dev);
+
+/**
+ * struct drm_client_buffer - DRM client buffer
+ */
+struct drm_client_buffer {
+ /**
+ * @client: DRM client
+ */
+ struct drm_client_dev *client;
+
+ /**
+ * @handle: Buffer handle
+ */
+ u32 handle;
+
+ /**
+ * @pitch: Buffer pitch
+ */
+ u32 pitch;
+
+ /**
+ * @gem: GEM object backing this buffer
+ */
+ struct drm_gem_object *gem;
+
+ /**
+ * @vaddr: Virtual address for the buffer
+ */
+ void *vaddr;
+
+ /**
+ * @fb: DRM framebuffer
+ */
+ struct drm_framebuffer *fb;
+};
+
+struct drm_client_buffer *
+drm_client_framebuffer_create(struct drm_client_dev *client, u32 width, u32 height, u32 format);
+void drm_client_framebuffer_delete(struct drm_client_buffer *buffer);
+
+int drm_client_debugfs_init(struct drm_minor *minor);
+
+#endif
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 675cc3f8cf85..97ea41dc678f 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -290,6 +290,10 @@ struct drm_display_info {
#define DRM_BUS_FLAG_DATA_MSB_TO_LSB (1<<4)
/* data is transmitted LSB to MSB on the bus */
#define DRM_BUS_FLAG_DATA_LSB_TO_MSB (1<<5)
+/* drive sync on pos. edge */
+#define DRM_BUS_FLAG_SYNC_POSEDGE (1<<6)
+/* drive sync on neg. edge */
+#define DRM_BUS_FLAG_SYNC_NEGEDGE (1<<7)
/**
* @bus_flags: Additional information (like pixel signal polarity) for
@@ -374,12 +378,9 @@ struct drm_tv_connector_state {
/**
* struct drm_connector_state - mutable connector state
- * @connector: backpointer to the connector
- * @best_encoder: can be used by helpers and drivers to select the encoder
- * @state: backpointer to global drm_atomic_state
- * @tv: TV connector state
*/
struct drm_connector_state {
+ /** @connector: backpointer to the connector */
struct drm_connector *connector;
/**
@@ -390,6 +391,13 @@ struct drm_connector_state {
*/
struct drm_crtc *crtc;
+ /**
+ * @best_encoder:
+ *
+ * Used by the atomic helpers to select the encoder, through the
+ * &drm_connector_helper_funcs.atomic_best_encoder or
+ * &drm_connector_helper_funcs.best_encoder callbacks.
+ */
struct drm_encoder *best_encoder;
/**
@@ -398,6 +406,7 @@ struct drm_connector_state {
*/
enum drm_link_status link_status;
+ /** @state: backpointer to global drm_atomic_state */
struct drm_atomic_state *state;
/**
@@ -407,6 +416,7 @@ struct drm_connector_state {
*/
struct drm_crtc_commit *commit;
+ /** @tv: TV connector state */
struct drm_tv_connector_state tv;
/**
@@ -419,6 +429,14 @@ struct drm_connector_state {
enum hdmi_picture_aspect picture_aspect_ratio;
/**
+ * @content_type: Connector property to control the
+ * HDMI infoframe content type setting.
+ * The %DRM_MODE_CONTENT_TYPE_\* values much
+ * match the values.
+ */
+ unsigned int content_type;
+
+ /**
* @scaling_mode: Connector property to control the
* upscaling, mostly used for built-in panels.
*/
@@ -429,6 +447,19 @@ struct drm_connector_state {
* protection. This is most commonly used for HDCP.
*/
unsigned int content_protection;
+
+ /**
+ * @writeback_job: Writeback job for writeback connectors
+ *
+ * Holds the framebuffer and out-fence for a writeback connector. As
+ * the writeback completion may be asynchronous to the normal commit
+ * cycle, the writeback job lifetime is managed separately from the
+ * normal atomic state by this object.
+ *
+ * See also: drm_writeback_queue_job() and
+ * drm_writeback_signal_completion()
+ */
+ struct drm_writeback_job *writeback_job;
};
/**
@@ -530,8 +561,7 @@ struct drm_connector_funcs {
* received for this output connector->edid must be NULL.
*
* Drivers using the probe helpers should use
- * drm_helper_probe_single_connector_modes() or
- * drm_helper_probe_single_connector_modes_nomerge() to implement this
+ * drm_helper_probe_single_connector_modes() to implement this
* function.
*
* RETURNS:
@@ -608,6 +638,8 @@ struct drm_connector_funcs {
* cleaned up by calling the @atomic_destroy_state hook in this
* structure.
*
+ * This callback is mandatory for atomic drivers.
+ *
* Atomic drivers which don't subclass &struct drm_connector_state should use
* drm_atomic_helper_connector_duplicate_state(). Drivers that subclass the
* state structure to extend it with driver-private state should use
@@ -634,6 +666,8 @@ struct drm_connector_funcs {
*
* Destroy a state duplicated with @atomic_duplicate_state and release
* or unreference all resources it references
+ *
+ * This callback is mandatory for atomic drivers.
*/
void (*atomic_destroy_state)(struct drm_connector *connector,
struct drm_connector_state *state);
@@ -738,45 +772,6 @@ struct drm_cmdline_mode {
/**
* struct drm_connector - central DRM connector control structure
- * @dev: parent DRM device
- * @kdev: kernel device for sysfs attributes
- * @attr: sysfs attributes
- * @head: list management
- * @base: base KMS object
- * @name: human readable name, can be overwritten by the driver
- * @connector_type: one of the DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
- * @connector_type_id: index into connector type enum
- * @interlace_allowed: can this connector handle interlaced modes?
- * @doublescan_allowed: can this connector handle doublescan?
- * @stereo_allowed: can this connector handle stereo modes?
- * @funcs: connector control functions
- * @edid_blob_ptr: DRM property containing EDID if present
- * @properties: property tracking for this connector
- * @dpms: current dpms state
- * @helper_private: mid-layer private data
- * @cmdline_mode: mode line parsed from the kernel cmdline for this connector
- * @force: a DRM_FORCE_<foo> state for forced mode sets
- * @override_edid: has the EDID been overwritten through debugfs for testing?
- * @encoder_ids: valid encoders for this connector
- * @eld: EDID-like data, if present
- * @latency_present: AV delay info from ELD, if found
- * @video_latency: video latency info from ELD, if found
- * @audio_latency: audio latency info from ELD, if found
- * @null_edid_counter: track sinks that give us all zeros for the EDID
- * @bad_edid_counter: track sinks that give us an EDID with invalid checksum
- * @edid_corrupt: indicates whether the last read EDID was corrupt
- * @debugfs_entry: debugfs directory for this connector
- * @has_tile: is this connector connected to a tiled monitor
- * @tile_group: tile group for the connected monitor
- * @tile_is_single_monitor: whether the tile is one monitor housing
- * @num_h_tile: number of horizontal tiles in the tile group
- * @num_v_tile: number of vertical tiles in the tile group
- * @tile_h_loc: horizontal location of this tile
- * @tile_v_loc: vertical location of this tile
- * @tile_h_size: horizontal size of this tile.
- * @tile_v_size: vertical size of this tile.
- * @scaling_mode_property: Optional atomic property to control the upscaling.
- * @content_protection_property: Optional property to control content protection
*
* Each connector may be connected to one or more CRTCs, or may be clonable by
* another connector if they can share a CRTC. Each connector also has a specific
@@ -784,13 +779,27 @@ struct drm_cmdline_mode {
* span multiple monitors).
*/
struct drm_connector {
+ /** @dev: parent DRM device */
struct drm_device *dev;
+ /** @kdev: kernel device for sysfs attributes */
struct device *kdev;
+ /** @attr: sysfs attributes */
struct device_attribute *attr;
+
+ /**
+ * @head:
+ *
+ * List of all connectors on a @dev, linked from
+ * &drm_mode_config.connector_list. Protected by
+ * &drm_mode_config.connector_list_lock, but please only use
+ * &drm_connector_list_iter to walk this list.
+ */
struct list_head head;
+ /** @base: base KMS object */
struct drm_mode_object base;
+ /** @name: human readable name, can be overwritten by the driver */
char *name;
/**
@@ -808,10 +817,30 @@ struct drm_connector {
*/
unsigned index;
+ /**
+ * @connector_type:
+ * one of the DRM_MODE_CONNECTOR_<foo> types from drm_mode.h
+ */
int connector_type;
+ /** @connector_type_id: index into connector type enum */
int connector_type_id;
+ /**
+ * @interlace_allowed:
+ * Can this connector handle interlaced modes? Only used by
+ * drm_helper_probe_single_connector_modes() for mode filtering.
+ */
bool interlace_allowed;
+ /**
+ * @doublescan_allowed:
+ * Can this connector handle doublescan? Only used by
+ * drm_helper_probe_single_connector_modes() for mode filtering.
+ */
bool doublescan_allowed;
+ /**
+ * @stereo_allowed:
+ * Can this connector handle stereo modes? Only used by
+ * drm_helper_probe_single_connector_modes() for mode filtering.
+ */
bool stereo_allowed;
/**
@@ -860,45 +889,42 @@ struct drm_connector {
* Protected by &drm_mode_config.mutex.
*/
struct drm_display_info display_info;
+
+ /** @funcs: connector control functions */
const struct drm_connector_funcs *funcs;
+ /**
+ * @edid_blob_ptr: DRM property containing EDID if present. Protected by
+ * &drm_mode_config.mutex. This should be updated only by calling
+ * drm_connector_update_edid_property().
+ */
struct drm_property_blob *edid_blob_ptr;
+
+ /** @properties: property tracking for this connector */
struct drm_object_properties properties;
+ /**
+ * @scaling_mode_property: Optional atomic property to control the
+ * upscaling. See drm_connector_attach_content_protection_property().
+ */
struct drm_property *scaling_mode_property;
/**
* @content_protection_property: DRM ENUM property for content
- * protection
+ * protection. See drm_connector_attach_content_protection_property().
*/
struct drm_property *content_protection_property;
/**
* @path_blob_ptr:
*
- * DRM blob property data for the DP MST path property.
+ * DRM blob property data for the DP MST path property. This should only
+ * be updated by calling drm_connector_set_path_property().
*/
struct drm_property_blob *path_blob_ptr;
- /**
- * @tile_blob_ptr:
- *
- * DRM blob property data for the tile property (used mostly by DP MST).
- * This is meant for screens which are driven through separate display
- * pipelines represented by &drm_crtc, which might not be running with
- * genlocked clocks. For tiled panels which are genlocked, like
- * dual-link LVDS or dual-link DSI, the driver should try to not expose
- * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
- */
- struct drm_property_blob *tile_blob_ptr;
-
-/* should we poll this connector for connects and disconnects */
-/* hot plug detectable */
#define DRM_CONNECTOR_POLL_HPD (1 << 0)
-/* poll for connections */
#define DRM_CONNECTOR_POLL_CONNECT (1 << 1)
-/* can cleanly poll for disconnections without flickering the screen */
-/* DACs should rarely do this without a lot of testing */
#define DRM_CONNECTOR_POLL_DISCONNECT (1 << 2)
/**
@@ -915,25 +941,40 @@ struct drm_connector {
* Periodically poll the connector for connection.
*
* DRM_CONNECTOR_POLL_DISCONNECT
- * Periodically poll the connector for disconnection.
+ * Periodically poll the connector for disconnection, without
+ * causing flickering even when the connector is in use. DACs should
+ * rarely do this without a lot of testing.
*
* Set to 0 for connectors that don't support connection status
* discovery.
*/
uint8_t polled;
- /* requested DPMS state */
+ /**
+ * @dpms: Current dpms state. For legacy drivers the
+ * &drm_connector_funcs.dpms callback must update this. For atomic
+ * drivers, this is handled by the core atomic code, and drivers must
+ * only take &drm_crtc_state.active into account.
+ */
int dpms;
+ /** @helper_private: mid-layer private data */
const struct drm_connector_helper_funcs *helper_private;
- /* forced on connector */
+ /** @cmdline_mode: mode line parsed from the kernel cmdline for this connector */
struct drm_cmdline_mode cmdline_mode;
+ /** @force: a DRM_FORCE_<foo> state for forced mode sets */
enum drm_connector_force force;
+ /** @override_edid: has the EDID been overwritten through debugfs for testing? */
bool override_edid;
#define DRM_CONNECTOR_MAX_ENCODER 3
+ /**
+ * @encoder_ids: Valid encoders for this connector. Please only use
+ * drm_connector_for_each_possible_encoder() to enumerate these.
+ */
uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
+
/**
* @encoder: Currently bound encoder driving this connector, if any.
* Only really meaningful for non-atomic drivers. Atomic drivers should
@@ -943,19 +984,37 @@ struct drm_connector {
struct drm_encoder *encoder;
#define MAX_ELD_BYTES 128
- /* EDID bits */
+ /** @eld: EDID-like data, if present */
uint8_t eld[MAX_ELD_BYTES];
+ /** @latency_present: AV delay info from ELD, if found */
bool latency_present[2];
- int video_latency[2]; /* [0]: progressive, [1]: interlaced */
+ /**
+ * @video_latency: Video latency info from ELD, if found.
+ * [0]: progressive, [1]: interlaced
+ */
+ int video_latency[2];
+ /**
+ * @audio_latency: audio latency info from ELD, if found
+ * [0]: progressive, [1]: interlaced
+ */
int audio_latency[2];
- int null_edid_counter; /* needed to workaround some HW bugs where we get all 0s */
+ /**
+ * @null_edid_counter: track sinks that give us all zeros for the EDID.
+ * Needed to workaround some HW bugs where we get all 0s
+ */
+ int null_edid_counter;
+
+ /** @bad_edid_counter: track sinks that give us an EDID with invalid checksum */
unsigned bad_edid_counter;
- /* Flag for raw EDID header corruption - used in Displayport
- * compliance testing - * Displayport Link CTS Core 1.2 rev1.1 4.2.2.6
+ /**
+ * @edid_corrupt: Indicates whether the last read EDID was corrupt. Used
+ * in Displayport compliance testing - Displayport Link CTS Core 1.2
+ * rev1.1 4.2.2.6
*/
bool edid_corrupt;
+ /** @debugfs_entry: debugfs directory for this connector */
struct dentry *debugfs_entry;
/**
@@ -963,7 +1022,7 @@ struct drm_connector {
*
* Current atomic state for this connector.
*
- * This is protected by @drm_mode_config.connection_mutex. Note that
+ * This is protected by &drm_mode_config.connection_mutex. Note that
* nonblocking atomic commits access the current connector state without
* taking locks. Either by going through the &struct drm_atomic_state
* pointers, see for_each_oldnew_connector_in_state(),
@@ -974,19 +1033,44 @@ struct drm_connector {
*/
struct drm_connector_state *state;
- /* DisplayID bits */
+ /* DisplayID bits. FIXME: Extract into a substruct? */
+
+ /**
+ * @tile_blob_ptr:
+ *
+ * DRM blob property data for the tile property (used mostly by DP MST).
+ * This is meant for screens which are driven through separate display
+ * pipelines represented by &drm_crtc, which might not be running with
+ * genlocked clocks. For tiled panels which are genlocked, like
+ * dual-link LVDS or dual-link DSI, the driver should try to not expose
+ * the tiling and virtualize both &drm_crtc and &drm_plane if needed.
+ *
+ * This should only be updated by calling
+ * drm_connector_set_tile_property().
+ */
+ struct drm_property_blob *tile_blob_ptr;
+
+ /** @has_tile: is this connector connected to a tiled monitor */
bool has_tile;
+ /** @tile_group: tile group for the connected monitor */
struct drm_tile_group *tile_group;
+ /** @tile_is_single_monitor: whether the tile is one monitor housing */
bool tile_is_single_monitor;
+ /** @num_h_tile: number of horizontal tiles in the tile group */
+ /** @num_v_tile: number of vertical tiles in the tile group */
uint8_t num_h_tile, num_v_tile;
+ /** @tile_h_loc: horizontal location of this tile */
+ /** @tile_v_loc: vertical location of this tile */
uint8_t tile_h_loc, tile_v_loc;
+ /** @tile_h_size: horizontal size of this tile. */
+ /** @tile_v_size: vertical size of this tile. */
uint16_t tile_h_size, tile_v_size;
/**
* @free_node:
*
- * List used only by &drm_connector_iter to be able to clean up a
+ * List used only by &drm_connector_list_iter to be able to clean up a
* connector from any context, in conjunction with
* &drm_mode_config.connector_free_work.
*/
@@ -1001,15 +1085,21 @@ int drm_connector_init(struct drm_device *dev,
int connector_type);
int drm_connector_register(struct drm_connector *connector);
void drm_connector_unregister(struct drm_connector *connector);
-int drm_mode_connector_attach_encoder(struct drm_connector *connector,
+int drm_connector_attach_encoder(struct drm_connector *connector,
struct drm_encoder *encoder);
void drm_connector_cleanup(struct drm_connector *connector);
-static inline unsigned drm_connector_index(struct drm_connector *connector)
+
+static inline unsigned int drm_connector_index(const struct drm_connector *connector)
{
return connector->index;
}
+static inline u32 drm_connector_mask(const struct drm_connector *connector)
+{
+ return 1 << connector->index;
+}
+
/**
* drm_connector_lookup - lookup connector object
* @dev: DRM device
@@ -1089,20 +1179,25 @@ int drm_mode_create_tv_properties(struct drm_device *dev,
unsigned int num_modes,
const char * const modes[]);
int drm_mode_create_scaling_mode_property(struct drm_device *dev);
+int drm_connector_attach_content_type_property(struct drm_connector *dev);
int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
u32 scaling_mode_mask);
int drm_connector_attach_content_protection_property(
struct drm_connector *connector);
int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
+int drm_mode_create_content_type_property(struct drm_device *dev);
+void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state);
+
int drm_mode_create_suggested_offset_properties(struct drm_device *dev);
-int drm_mode_connector_set_path_property(struct drm_connector *connector,
- const char *path);
-int drm_mode_connector_set_tile_property(struct drm_connector *connector);
-int drm_mode_connector_update_edid_property(struct drm_connector *connector,
- const struct edid *edid);
-void drm_mode_connector_set_link_status_property(struct drm_connector *connector,
- uint64_t link_status);
+int drm_connector_set_path_property(struct drm_connector *connector,
+ const char *path);
+int drm_connector_set_tile_property(struct drm_connector *connector);
+int drm_connector_update_edid_property(struct drm_connector *connector,
+ const struct edid *edid);
+void drm_connector_set_link_status_property(struct drm_connector *connector,
+ uint64_t link_status);
int drm_connector_init_panel_orientation_property(
struct drm_connector *connector, int width, int height);
@@ -1151,6 +1246,9 @@ struct drm_connector *
drm_connector_list_iter_next(struct drm_connector_list_iter *iter);
void drm_connector_list_iter_end(struct drm_connector_list_iter *iter);
+bool drm_connector_has_possible_encoder(struct drm_connector *connector,
+ struct drm_encoder *encoder);
+
/**
* drm_for_each_connector_iter - connector_list iterator macro
* @connector: &struct drm_connector pointer used as cursor
@@ -1163,4 +1261,17 @@ void drm_connector_list_iter_end(struct drm_connector_list_iter *iter);
#define drm_for_each_connector_iter(connector, iter) \
while ((connector = drm_connector_list_iter_next(iter)))
+/**
+ * drm_connector_for_each_possible_encoder - iterate connector's possible encoders
+ * @connector: &struct drm_connector pointer
+ * @encoder: &struct drm_encoder pointer used as cursor
+ * @__i: int iteration cursor, for macro-internal use
+ */
+#define drm_connector_for_each_possible_encoder(connector, encoder, __i) \
+ for ((__i) = 0; (__i) < ARRAY_SIZE((connector)->encoder_ids) && \
+ (connector)->encoder_ids[(__i)] != 0; (__i)++) \
+ for_each_if((encoder) = \
+ drm_encoder_find((connector)->dev, NULL, \
+ (connector)->encoder_ids[(__i)])) \
+
#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index a2d81d2907a9..92e7fc7f05a4 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -77,21 +77,6 @@ struct drm_plane_helper_funcs;
/**
* struct drm_crtc_state - mutable CRTC state
- * @crtc: backpointer to the CRTC
- * @enable: whether the CRTC should be enabled, gates all other state
- * @active: whether the CRTC is actively displaying (used for DPMS)
- * @planes_changed: planes on this crtc are updated
- * @mode_changed: @mode or @enable has been changed
- * @active_changed: @active has been toggled.
- * @connectors_changed: connectors to this crtc have been updated
- * @zpos_changed: zpos values of planes on this crtc have been updated
- * @color_mgmt_changed: color management properties have changed (degamma or
- * gamma LUT or CSC matrix)
- * @plane_mask: bitmask of (1 << drm_plane_index(plane)) of attached planes
- * @connector_mask: bitmask of (1 << drm_connector_index(connector)) of attached connectors
- * @encoder_mask: bitmask of (1 << drm_encoder_index(encoder)) of attached encoders
- * @mode_blob: &drm_property_blob for @mode
- * @state: backpointer to global drm_atomic_state
*
* Note that the distinction between @enable and @active is rather subtile:
* Flipping @active while @enable is set without changing anything else may
@@ -102,31 +87,127 @@ struct drm_plane_helper_funcs;
*
* The three booleans active_changed, connectors_changed and mode_changed are
* intended to indicate whether a full modeset is needed, rather than strictly
- * describing what has changed in a commit.
- * See also: drm_atomic_crtc_needs_modeset()
+ * describing what has changed in a commit. See also:
+ * drm_atomic_crtc_needs_modeset()
+ *
+ * WARNING: Transitional helpers (like drm_helper_crtc_mode_set() or
+ * drm_helper_crtc_mode_set_base()) do not maintain many of the derived control
+ * state like @plane_mask so drivers not converted over to atomic helpers should
+ * not rely on these being accurate!
*/
struct drm_crtc_state {
+ /** @crtc: backpointer to the CRTC */
struct drm_crtc *crtc;
+ /**
+ * @enable: Whether the CRTC should be enabled, gates all other state.
+ * This controls reservations of shared resources. Actual hardware state
+ * is controlled by @active.
+ */
bool enable;
+
+ /**
+ * @active: Whether the CRTC is actively displaying (used for DPMS).
+ * Implies that @enable is set. The driver must not release any shared
+ * resources if @active is set to false but @enable still true, because
+ * userspace expects that a DPMS ON always succeeds.
+ *
+ * Hence drivers must not consult @active in their various
+ * &drm_mode_config_funcs.atomic_check callback to reject an atomic
+ * commit. They can consult it to aid in the computation of derived
+ * hardware state, since even in the DPMS OFF state the display hardware
+ * should be as much powered down as when the CRTC is completely
+ * disabled through setting @enable to false.
+ */
bool active;
- /* computed state bits used by helpers and drivers */
+ /**
+ * @planes_changed: Planes on this crtc are updated. Used by the atomic
+ * helpers and drivers to steer the atomic commit control flow.
+ */
bool planes_changed : 1;
+
+ /**
+ * @mode_changed: @mode or @enable has been changed. Used by the atomic
+ * helpers and drivers to steer the atomic commit control flow. See also
+ * drm_atomic_crtc_needs_modeset().
+ *
+ * Drivers are supposed to set this for any CRTC state changes that
+ * require a full modeset. They can also reset it to false if e.g. a
+ * @mode change can be done without a full modeset by only changing
+ * scaler settings.
+ */
bool mode_changed : 1;
+
+ /**
+ * @active_changed: @active has been toggled. Used by the atomic
+ * helpers and drivers to steer the atomic commit control flow. See also
+ * drm_atomic_crtc_needs_modeset().
+ */
bool active_changed : 1;
+
+ /**
+ * @connectors_changed: Connectors to this crtc have been updated,
+ * either in their state or routing. Used by the atomic
+ * helpers and drivers to steer the atomic commit control flow. See also
+ * drm_atomic_crtc_needs_modeset().
+ *
+ * Drivers are supposed to set this as-needed from their own atomic
+ * check code, e.g. from &drm_encoder_helper_funcs.atomic_check
+ */
bool connectors_changed : 1;
+ /**
+ * @zpos_changed: zpos values of planes on this crtc have been updated.
+ * Used by the atomic helpers and drivers to steer the atomic commit
+ * control flow.
+ */
bool zpos_changed : 1;
+ /**
+ * @color_mgmt_changed: Color management properties have changed
+ * (@gamma_lut, @degamma_lut or @ctm). Used by the atomic helpers and
+ * drivers to steer the atomic commit control flow.
+ */
bool color_mgmt_changed : 1;
- /* attached planes bitmask:
- * WARNING: transitional helpers do not maintain plane_mask so
- * drivers not converted over to atomic helpers should not rely
- * on plane_mask being accurate!
+ /**
+ * @no_vblank:
+ *
+ * Reflects the ability of a CRTC to send VBLANK events. This state
+ * usually depends on the pipeline configuration, and the main usuage
+ * is CRTCs feeding a writeback connector operating in oneshot mode.
+ * In this case the VBLANK event is only generated when a job is queued
+ * to the writeback connector, and we want the core to fake VBLANK
+ * events when this part of the pipeline hasn't changed but others had
+ * or when the CRTC and connectors are being disabled.
+ *
+ * __drm_atomic_helper_crtc_duplicate_state() will not reset the value
+ * from the current state, the CRTC driver is then responsible for
+ * updating this field when needed.
+ *
+ * Note that the combination of &drm_crtc_state.event == NULL and
+ * &drm_crtc_state.no_blank == true is valid and usually used when the
+ * writeback connector attached to the CRTC has a new job queued. In
+ * this case the driver will send the VBLANK event on its own when the
+ * writeback job is complete.
+ */
+ bool no_vblank : 1;
+
+ /**
+ * @plane_mask: Bitmask of drm_plane_mask(plane) of planes attached to
+ * this CRTC.
*/
u32 plane_mask;
+ /**
+ * @connector_mask: Bitmask of drm_connector_mask(connector) of
+ * connectors attached to this CRTC.
+ */
u32 connector_mask;
+
+ /**
+ * @encoder_mask: Bitmask of drm_encoder_mask(encoder) of encoders
+ * attached to this CRTC.
+ */
u32 encoder_mask;
/**
@@ -134,10 +215,13 @@ struct drm_crtc_state {
*
* Internal display timings which can be used by the driver to handle
* differences between the mode requested by userspace in @mode and what
- * is actually programmed into the hardware. It is purely driver
- * implementation defined what exactly this adjusted mode means. Usually
- * it is used to store the hardware display timings used between the
- * CRTC and encoder blocks.
+ * is actually programmed into the hardware.
+ *
+ * For drivers using &drm_bridge, this stores hardware display timings
+ * used between the CRTC and the first bridge. For other drivers, the
+ * meaning of the adjusted_mode field is purely driver implementation
+ * defined information, and will usually be used to store the hardware
+ * display timings used between the CRTC and encoder blocks.
*/
struct drm_display_mode adjusted_mode;
@@ -158,7 +242,10 @@ struct drm_crtc_state {
*/
struct drm_display_mode mode;
- /* blob property to expose current mode to atomic userspace */
+ /**
+ * @mode_blob: &drm_property_blob for @mode, for exposing the mode to
+ * atomic userspace.
+ */
struct drm_property_blob *mode_blob;
/**
@@ -262,6 +349,7 @@ struct drm_crtc_state {
*/
struct drm_crtc_commit *commit;
+ /** @state: backpointer to global drm_atomic_state */
struct drm_atomic_state *state;
};
@@ -503,6 +591,8 @@ struct drm_crtc_funcs {
* cleaned up by calling the @atomic_destroy_state hook in this
* structure.
*
+ * This callback is mandatory for atomic drivers.
+ *
* Atomic drivers which don't subclass &struct drm_crtc_state should use
* drm_atomic_helper_crtc_duplicate_state(). Drivers that subclass the
* state structure to extend it with driver-private state should use
@@ -529,6 +619,8 @@ struct drm_crtc_funcs {
*
* Destroy a state duplicated with @atomic_duplicate_state and release
* or unreference all resources it references
+ *
+ * This callback is mandatory for atomic drivers.
*/
void (*atomic_destroy_state)(struct drm_crtc *crtc,
struct drm_crtc_state *state);
@@ -717,35 +809,25 @@ struct drm_crtc_funcs {
/**
* struct drm_crtc - central CRTC control structure
- * @dev: parent DRM device
- * @port: OF node used by drm_of_find_possible_crtcs()
- * @head: list management
- * @name: human readable name, can be overwritten by the driver
- * @mutex: per-CRTC locking
- * @base: base KMS object for ID tracking etc.
- * @primary: primary plane for this CRTC
- * @cursor: cursor plane for this CRTC
- * @cursor_x: current x position of the cursor, used for universal cursor planes
- * @cursor_y: current y position of the cursor, used for universal cursor planes
- * @enabled: is this CRTC enabled?
- * @mode: current mode timings
- * @hwmode: mode timings as programmed to hw regs
- * @x: x position on screen
- * @y: y position on screen
- * @funcs: CRTC control functions
- * @gamma_size: size of gamma ramp
- * @gamma_store: gamma ramp values
- * @helper_private: mid-layer private data
- * @properties: property tracking for this CRTC
*
* Each CRTC may have one or more connectors associated with it. This structure
* allows the CRTC to be controlled.
*/
struct drm_crtc {
+ /** @dev: parent DRM device */
struct drm_device *dev;
+ /** @port: OF node used by drm_of_find_possible_crtcs(). */
struct device_node *port;
+ /**
+ * @head:
+ *
+ * List of all CRTCs on @dev, linked from &drm_mode_config.crtc_list.
+ * Invariant over the lifetime of @dev and therefore does not need
+ * locking.
+ */
struct list_head head;
+ /** @name: human readable name, can be overwritten by the driver */
char *name;
/**
@@ -760,10 +842,25 @@ struct drm_crtc {
*/
struct drm_modeset_lock mutex;
+ /** @base: base KMS object for ID tracking etc. */
struct drm_mode_object base;
- /* primary and cursor planes for CRTC */
+ /**
+ * @primary:
+ * Primary plane for this CRTC. Note that this is only
+ * relevant for legacy IOCTL, it specifies the plane implicitly used by
+ * the SETCRTC and PAGE_FLIP IOCTLs. It does not have any significance
+ * beyond that.
+ */
struct drm_plane *primary;
+
+ /**
+ * @cursor:
+ * Cursor plane for this CRTC. Note that this is only relevant for
+ * legacy IOCTL, it specifies the plane implicitly used by the SETCURSOR
+ * and SETCURSOR2 IOCTLs. It does not have any significance
+ * beyond that.
+ */
struct drm_plane *cursor;
/**
@@ -772,30 +869,94 @@ struct drm_crtc {
*/
unsigned index;
- /* position of cursor plane on crtc */
+ /**
+ * @cursor_x: Current x position of the cursor, used for universal
+ * cursor planes because the SETCURSOR IOCTL only can update the
+ * framebuffer without supplying the coordinates. Drivers should not use
+ * this directly, atomic drivers should look at &drm_plane_state.crtc_x
+ * of the cursor plane instead.
+ */
int cursor_x;
+ /**
+ * @cursor_y: Current y position of the cursor, used for universal
+ * cursor planes because the SETCURSOR IOCTL only can update the
+ * framebuffer without supplying the coordinates. Drivers should not use
+ * this directly, atomic drivers should look at &drm_plane_state.crtc_y
+ * of the cursor plane instead.
+ */
int cursor_y;
+ /**
+ * @enabled:
+ *
+ * Is this CRTC enabled? Should only be used by legacy drivers, atomic
+ * drivers should instead consult &drm_crtc_state.enable and
+ * &drm_crtc_state.active. Atomic drivers can update this by calling
+ * drm_atomic_helper_update_legacy_modeset_state().
+ */
bool enabled;
- /* Requested mode from modesetting. */
+ /**
+ * @mode:
+ *
+ * Current mode timings. Should only be used by legacy drivers, atomic
+ * drivers should instead consult &drm_crtc_state.mode. Atomic drivers
+ * can update this by calling
+ * drm_atomic_helper_update_legacy_modeset_state().
+ */
struct drm_display_mode mode;
- /* Programmed mode in hw, after adjustments for encoders,
- * crtc, panel scaling etc. Needed for timestamping etc.
+ /**
+ * @hwmode:
+ *
+ * Programmed mode in hw, after adjustments for encoders, crtc, panel
+ * scaling etc. Should only be used by legacy drivers, for high
+ * precision vblank timestamps in
+ * drm_calc_vbltimestamp_from_scanoutpos().
+ *
+ * Note that atomic drivers should not use this, but instead use
+ * &drm_crtc_state.adjusted_mode. And for high-precision timestamps
+ * drm_calc_vbltimestamp_from_scanoutpos() used &drm_vblank_crtc.hwmode,
+ * which is filled out by calling drm_calc_timestamping_constants().
*/
struct drm_display_mode hwmode;
- int x, y;
+ /**
+ * @x:
+ * x position on screen. Should only be used by legacy drivers, atomic
+ * drivers should look at &drm_plane_state.crtc_x of the primary plane
+ * instead. Updated by calling
+ * drm_atomic_helper_update_legacy_modeset_state().
+ */
+ int x;
+ /**
+ * @y:
+ * y position on screen. Should only be used by legacy drivers, atomic
+ * drivers should look at &drm_plane_state.crtc_y of the primary plane
+ * instead. Updated by calling
+ * drm_atomic_helper_update_legacy_modeset_state().
+ */
+ int y;
+
+ /** @funcs: CRTC control functions */
const struct drm_crtc_funcs *funcs;
- /* Legacy FB CRTC gamma size for reporting to userspace */
+ /**
+ * @gamma_size: Size of legacy gamma ramp reported to userspace. Set up
+ * by calling drm_mode_crtc_set_gamma_size().
+ */
uint32_t gamma_size;
+
+ /**
+ * @gamma_store: Gamma ramp values used by the legacy SETGAMMA and
+ * GETGAMMA IOCTls. Set up by calling drm_mode_crtc_set_gamma_size().
+ */
uint16_t *gamma_store;
- /* if you are using the helper */
+ /** @helper_private: mid-layer private data */
const struct drm_crtc_helper_funcs *helper_private;
+ /** @properties: property tracking for this CRTC */
struct drm_object_properties properties;
/**
@@ -865,7 +1026,6 @@ struct drm_crtc {
*
* spinlock to protect the fences in the fence_context.
*/
-
spinlock_t fence_lock;
/**
* @fence_seqno:
@@ -935,8 +1095,8 @@ static inline unsigned int drm_crtc_index(const struct drm_crtc *crtc)
* drm_crtc_mask - find the mask of a registered CRTC
* @crtc: CRTC to find mask for
*
- * Given a registered CRTC, return the mask bit of that CRTC for an
- * encoder's possible_crtcs field.
+ * Given a registered CRTC, return the mask bit of that CRTC for the
+ * &drm_encoder.possible_crtcs and &drm_plane.possible_crtcs fields.
*/
static inline uint32_t drm_crtc_mask(const struct drm_crtc *crtc)
{
diff --git a/include/drm/drm_debugfs_crc.h b/include/drm/drm_debugfs_crc.h
index 7d63b1d4adb9..b225eeb30d05 100644
--- a/include/drm/drm_debugfs_crc.h
+++ b/include/drm/drm_debugfs_crc.h
@@ -43,6 +43,7 @@ struct drm_crtc_crc_entry {
* @lock: protects the fields in this struct
* @source: name of the currently configured source of CRCs
* @opened: whether userspace has opened the data file for reading
+ * @overflow: whether an overflow occured.
* @entries: array of entries, with size of %DRM_CRC_ENTRIES_NR
* @head: head of circular queue
* @tail: tail of circular queue
@@ -52,7 +53,7 @@ struct drm_crtc_crc_entry {
struct drm_crtc_crc {
spinlock_t lock;
const char *source;
- bool opened;
+ bool opened, overflow;
struct drm_crtc_crc_entry *entries;
int head, tail;
size_t values_cnt;
diff --git a/include/drm/drm_device.h b/include/drm/drm_device.h
index 858ba19a3e29..f9c6e0e3aec7 100644
--- a/include/drm/drm_device.h
+++ b/include/drm/drm_device.h
@@ -74,6 +74,27 @@ struct drm_device {
struct mutex filelist_mutex;
struct list_head filelist;
+ /**
+ * @filelist_internal:
+ *
+ * List of open DRM files for in-kernel clients. Protected by @filelist_mutex.
+ */
+ struct list_head filelist_internal;
+
+ /**
+ * @clientlist_mutex:
+ *
+ * Protects @clientlist access.
+ */
+ struct mutex clientlist_mutex;
+
+ /**
+ * @clientlist:
+ *
+ * List of in-kernel clients. Protected by @clientlist_mutex.
+ */
+ struct list_head clientlist;
+
/** \name Memory management */
/*@{ */
struct list_head maplist; /**< Linked list of regions */
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index c01564991a9f..05cc31b5db16 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1078,6 +1078,25 @@ struct drm_dp_aux_msg {
size_t size;
};
+struct cec_adapter;
+struct edid;
+
+/**
+ * struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX
+ * @lock: mutex protecting this struct
+ * @adap: the CEC adapter for CEC-Tunneling-over-AUX support.
+ * @name: name of the CEC adapter
+ * @parent: parent device of the CEC adapter
+ * @unregister_work: unregister the CEC adapter
+ */
+struct drm_dp_aux_cec {
+ struct mutex lock;
+ struct cec_adapter *adap;
+ const char *name;
+ struct device *parent;
+ struct delayed_work unregister_work;
+};
+
/**
* struct drm_dp_aux - DisplayPort AUX channel
* @name: user-visible name of this AUX channel and the I2C-over-AUX adapter
@@ -1136,6 +1155,10 @@ struct drm_dp_aux {
* @i2c_defer_count: Counts I2C DEFERs, used for DP validation.
*/
unsigned i2c_defer_count;
+ /**
+ * @cec: struct containing fields used for CEC-Tunneling-over-AUX.
+ */
+ struct drm_dp_aux_cec cec;
};
ssize_t drm_dp_dpcd_read(struct drm_dp_aux *aux, unsigned int offset,
@@ -1258,4 +1281,37 @@ drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
return desc->quirks & BIT(quirk);
}
+#ifdef CONFIG_DRM_DP_CEC
+void drm_dp_cec_irq(struct drm_dp_aux *aux);
+void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
+ struct device *parent);
+void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux);
+void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid);
+void drm_dp_cec_unset_edid(struct drm_dp_aux *aux);
+#else
+static inline void drm_dp_cec_irq(struct drm_dp_aux *aux)
+{
+}
+
+static inline void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
+ const char *name,
+ struct device *parent)
+{
+}
+
+static inline void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux)
+{
+}
+
+static inline void drm_dp_cec_set_edid(struct drm_dp_aux *aux,
+ const struct edid *edid)
+{
+}
+
+static inline void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
+{
+}
+
+#endif
+
#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 7e545f5f94d3..46a8009784df 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -649,6 +649,35 @@ static inline bool drm_dev_is_unplugged(struct drm_device *dev)
return true;
}
+/**
+ * drm_core_check_feature - check driver feature flags
+ * @dev: DRM device to check
+ * @feature: feature flag
+ *
+ * This checks @dev for driver features, see &drm_driver.driver_features and the
+ * various DRIVER_\* flags.
+ *
+ * Returns true if the @feature is supported, false otherwise.
+ */
+static inline bool drm_core_check_feature(struct drm_device *dev, int feature)
+{
+ return dev->driver->driver_features & feature;
+}
+
+/**
+ * drm_drv_uses_atomic_modeset - check if the driver implements
+ * atomic_commit()
+ * @dev: DRM device
+ *
+ * This check is useful if drivers do not have DRIVER_ATOMIC set but
+ * have atomic modesetting internally implemented.
+ */
+static inline bool drm_drv_uses_atomic_modeset(struct drm_device *dev)
+{
+ return drm_core_check_feature(dev, DRIVER_ATOMIC) ||
+ dev->mode_config.funcs->atomic_commit != NULL;
+}
+
int drm_dev_set_unique(struct drm_device *dev, const char *name);
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index fb299696c7c4..4f597c0730b4 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -191,12 +191,24 @@ int drm_encoder_init(struct drm_device *dev,
* Given a registered encoder, return the index of that encoder within a DRM
* device's list of encoders.
*/
-static inline unsigned int drm_encoder_index(struct drm_encoder *encoder)
+static inline unsigned int drm_encoder_index(const struct drm_encoder *encoder)
{
return encoder->index;
}
/**
+ * drm_encoder_mask - find the mask of a registered ENCODER
+ * @encoder: encoder to find mask for
+ *
+ * Given a registered encoder, return the mask bit of that encoder for an
+ * encoder's possible_clones field.
+ */
+static inline u32 drm_encoder_mask(const struct drm_encoder *encoder)
+{
+ return 1 << drm_encoder_index(encoder);
+}
+
+/**
* drm_encoder_crtc_ok - can a given crtc drive a given encoder?
* @encoder: encoder to test
* @crtc: crtc to test
@@ -241,7 +253,7 @@ void drm_encoder_cleanup(struct drm_encoder *encoder);
*/
#define drm_for_each_encoder_mask(encoder, dev, encoder_mask) \
list_for_each_entry((encoder), &(dev)->mode_config.encoder_list, head) \
- for_each_if ((encoder_mask) & (1 << drm_encoder_index(encoder)))
+ for_each_if ((encoder_mask) & drm_encoder_mask(encoder))
/**
* drm_for_each_encoder - iterate over all encoders
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index d532f88a8d55..96e26e3b9a0c 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -16,16 +16,10 @@ struct drm_mode_fb_cmd2;
struct drm_plane;
struct drm_plane_state;
-int drm_fb_cma_fbdev_init_with_funcs(struct drm_device *dev,
- unsigned int preferred_bpp, unsigned int max_conn_count,
- const struct drm_framebuffer_funcs *funcs);
int drm_fb_cma_fbdev_init(struct drm_device *dev, unsigned int preferred_bpp,
unsigned int max_conn_count);
void drm_fb_cma_fbdev_fini(struct drm_device *dev);
-struct drm_fbdev_cma *drm_fbdev_cma_init_with_funcs(struct drm_device *dev,
- unsigned int preferred_bpp, unsigned int max_conn_count,
- const struct drm_framebuffer_funcs *funcs);
struct drm_fbdev_cma *drm_fbdev_cma_init(struct drm_device *dev,
unsigned int preferred_bpp, unsigned int max_conn_count);
void drm_fbdev_cma_fini(struct drm_fbdev_cma *fbdev_cma);
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index b069433e7fc1..5db08c8f1d25 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -32,6 +32,7 @@
struct drm_fb_helper;
+#include <drm/drm_client.h>
#include <drm/drm_crtc.h>
#include <drm/drm_device.h>
#include <linux/kgdb.h>
@@ -154,6 +155,20 @@ struct drm_fb_helper_connector {
* operations.
*/
struct drm_fb_helper {
+ /**
+ * @client:
+ *
+ * DRM client used by the generic fbdev emulation.
+ */
+ struct drm_client_dev client;
+
+ /**
+ * @buffer:
+ *
+ * Framebuffer used by the generic fbdev emulation.
+ */
+ struct drm_client_buffer *buffer;
+
struct drm_framebuffer *fb;
struct drm_device *dev;
int crtc_count;
@@ -234,6 +249,12 @@ struct drm_fb_helper {
int preferred_bpp;
};
+static inline struct drm_fb_helper *
+drm_fb_helper_from_client(struct drm_client_dev *client)
+{
+ return container_of(client, struct drm_fb_helper, client);
+}
+
/**
* define DRM_FB_HELPER_DEFAULT_OPS - helper define for drm drivers
*
@@ -330,6 +351,10 @@ void drm_fb_helper_fbdev_teardown(struct drm_device *dev);
void drm_fb_helper_lastclose(struct drm_device *dev);
void drm_fb_helper_output_poll_changed(struct drm_device *dev);
+
+int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes);
+int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp);
#else
static inline void drm_fb_helper_prepare(struct drm_device *dev,
struct drm_fb_helper *helper,
@@ -564,6 +589,19 @@ static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev)
{
}
+static inline int
+drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
+ struct drm_fb_helper_surface_size *sizes)
+{
+ return 0;
+}
+
+static inline int
+drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
+{
+ return 0;
+}
+
#endif
static inline int
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 027ac16da3d1..26485acc51d7 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -193,6 +193,13 @@ struct drm_file {
unsigned aspect_ratio_allowed:1;
/**
+ * @writeback_connectors:
+ *
+ * True if client understands writeback connectors
+ */
+ unsigned writeback_connectors:1;
+
+ /**
* @is_master:
*
* This client is the creator of @master. Protected by struct
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 3e86408dac9f..f9c15845f465 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -39,6 +39,7 @@ struct drm_mode_fb_cmd2;
* @hsub: Horizontal chroma subsampling factor
* @vsub: Vertical chroma subsampling factor
* @has_alpha: Does the format embeds an alpha component?
+ * @is_yuv: Is it a YUV format?
*/
struct drm_format_info {
u32 format;
@@ -48,6 +49,7 @@ struct drm_format_info {
u8 hsub;
u8 vsub;
bool has_alpha;
+ bool is_yuv;
};
/**
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 101f566ae43d..2c3bbb43c7d1 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -109,6 +109,38 @@ enum drm_mm_insert_mode {
* Allocates the node from the bottom of the found hole.
*/
DRM_MM_INSERT_EVICT,
+
+ /**
+ * @DRM_MM_INSERT_ONCE:
+ *
+ * Only check the first hole for suitablity and report -ENOSPC
+ * immediately otherwise, rather than check every hole until a
+ * suitable one is found. Can only be used in conjunction with another
+ * search method such as DRM_MM_INSERT_HIGH or DRM_MM_INSERT_LOW.
+ */
+ DRM_MM_INSERT_ONCE = BIT(31),
+
+ /**
+ * @DRM_MM_INSERT_HIGHEST:
+ *
+ * Only check the highest hole (the hole with the largest address) and
+ * insert the node at the top of the hole or report -ENOSPC if
+ * unsuitable.
+ *
+ * Does not search all holes.
+ */
+ DRM_MM_INSERT_HIGHEST = DRM_MM_INSERT_HIGH | DRM_MM_INSERT_ONCE,
+
+ /**
+ * @DRM_MM_INSERT_LOWEST:
+ *
+ * Only check the lowest hole (the hole with the smallest address) and
+ * insert the node at the bottom of the hole or report -ENOSPC if
+ * unsuitable.
+ *
+ * Does not search all holes.
+ */
+ DRM_MM_INSERT_LOWEST = DRM_MM_INSERT_LOW | DRM_MM_INSERT_ONCE,
};
/**
@@ -173,7 +205,7 @@ struct drm_mm {
struct drm_mm_node head_node;
/* Keep an interval_tree for fast lookup of drm_mm_nodes by address. */
struct rb_root_cached interval_tree;
- struct rb_root holes_size;
+ struct rb_root_cached holes_size;
struct rb_root holes_addr;
unsigned long scan_active;
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 33b3a96d66d0..a0b202e1d69a 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -329,10 +329,10 @@ struct drm_mode_config_funcs {
/**
* struct drm_mode_config - Mode configuration control structure
- * @min_width: minimum pixel width on this device
- * @min_height: minimum pixel height on this device
- * @max_width: maximum pixel width on this device
- * @max_height: maximum pixel height on this device
+ * @min_width: minimum fb pixel width on this device
+ * @min_height: minimum fb pixel height on this device
+ * @max_width: maximum fb pixel width on this device
+ * @max_height: maximum fb pixel height on this device
* @funcs: core driver provided mode setting functions
* @fb_base: base address of the framebuffer
* @poll_enabled: track polling support for this device
@@ -727,6 +727,11 @@ struct drm_mode_config {
*/
struct drm_property *aspect_ratio_property;
/**
+ * @content_type_property: Optional connector property to control the
+ * HDMI infoframe content type setting.
+ */
+ struct drm_property *content_type_property;
+ /**
* @degamma_lut_property: Optional CRTC property to set the LUT used to
* convert the framebuffer's colors to linear gamma.
*/
@@ -779,6 +784,29 @@ struct drm_mode_config {
*/
struct drm_property *panel_orientation_property;
+ /**
+ * @writeback_fb_id_property: Property for writeback connectors, storing
+ * the ID of the output framebuffer.
+ * See also: drm_writeback_connector_init()
+ */
+ struct drm_property *writeback_fb_id_property;
+
+ /**
+ * @writeback_pixel_formats_property: Property for writeback connectors,
+ * storing an array of the supported pixel formats for the writeback
+ * engine (read-only).
+ * See also: drm_writeback_connector_init()
+ */
+ struct drm_property *writeback_pixel_formats_property;
+ /**
+ * @writeback_out_fence_ptr_property: Property for writeback connectors,
+ * fd pointer representing the outgoing fences for a writeback
+ * connector. Userspace should provide a pointer to a value of type s32,
+ * and then cast that pointer to u64.
+ * See also: drm_writeback_connector_init()
+ */
+ struct drm_property *writeback_out_fence_ptr_property;
+
/* dumb ioctl parameters */
uint32_t preferred_depth, prefer_shadow;
diff --git a/include/drm/drm_modes.h b/include/drm/drm_modes.h
index b159fe07fcf9..baded6514456 100644
--- a/include/drm/drm_modes.h
+++ b/include/drm/drm_modes.h
@@ -530,7 +530,7 @@ drm_mode_validate_ycbcr420(const struct drm_display_mode *mode,
void drm_mode_prune_invalid(struct drm_device *dev,
struct list_head *mode_list, bool verbose);
void drm_mode_sort(struct list_head *mode_list);
-void drm_mode_connector_list_update(struct drm_connector *connector);
+void drm_connector_list_update(struct drm_connector *connector);
/* parsing cmdline modes */
bool
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 35e2a3a79fc5..61142aa0ab23 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -785,7 +785,7 @@ struct drm_connector_helper_funcs {
*
* This function should fill in all modes currently valid for the sink
* into the &drm_connector.probed_modes list. It should also update the
- * EDID property by calling drm_mode_connector_update_edid_property().
+ * EDID property by calling drm_connector_update_edid_property().
*
* The usual way to implement this is to cache the EDID retrieved in the
* probe callback somewhere in the driver-private connector structure.
@@ -974,6 +974,21 @@ struct drm_connector_helper_funcs {
*/
int (*atomic_check)(struct drm_connector *connector,
struct drm_connector_state *state);
+
+ /**
+ * @atomic_commit:
+ *
+ * This hook is to be used by drivers implementing writeback connectors
+ * that need a point when to commit the writeback job to the hardware.
+ * The writeback_job to commit is available in
+ * &drm_connector_state.writeback_job.
+ *
+ * This hook is optional.
+ *
+ * This callback is used by the atomic modeset helpers.
+ */
+ void (*atomic_commit)(struct drm_connector *connector,
+ struct drm_connector_state *state);
};
/**
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index b93c239afb60..ead34ab5ca4e 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -17,6 +17,8 @@ struct drm_bridge;
struct device_node;
#ifdef CONFIG_OF
+uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
+ struct device_node *port);
uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
struct device_node *port);
void drm_of_component_match_add(struct device *master,
@@ -34,6 +36,12 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
struct drm_panel **panel,
struct drm_bridge **bridge);
#else
+static inline uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
+ struct device_node *port)
+{
+ return 0;
+}
+
static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
struct device_node *port)
{
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 14ac240a1f64..582a0ec0aa70 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -89,6 +89,7 @@ struct drm_panel {
struct drm_device *drm;
struct drm_connector *connector;
struct device *dev;
+ struct device_link *link;
const struct drm_panel_funcs *funcs;
@@ -199,7 +200,7 @@ struct drm_panel *of_drm_find_panel(const struct device_node *np);
#else
static inline struct drm_panel *of_drm_find_panel(const struct device_node *np)
{
- return NULL;
+ return ERR_PTR(-ENODEV);
}
#endif
diff --git a/include/drm/drm_pci.h b/include/drm/drm_pci.h
index 674599025d7d..8181e9e7cf1d 100644
--- a/include/drm/drm_pci.h
+++ b/include/drm/drm_pci.h
@@ -58,11 +58,4 @@ static inline int drm_get_pci_dev(struct pci_dev *pdev,
}
#endif
-#define DRM_PCIE_SPEED_25 1
-#define DRM_PCIE_SPEED_50 2
-#define DRM_PCIE_SPEED_80 4
-
-int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
-int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw);
-
#endif /* _DRM_PCI_H_ */
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 26fa50c2a50e..8a152dc16ea5 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -34,31 +34,15 @@ struct drm_modeset_acquire_ctx;
/**
* struct drm_plane_state - mutable plane state
- * @plane: backpointer to the plane
- * @crtc_w: width of visible portion of plane on crtc
- * @crtc_h: height of visible portion of plane on crtc
- * @src_x: left position of visible portion of plane within
- * plane (in 16.16)
- * @src_y: upper position of visible portion of plane within
- * plane (in 16.16)
- * @src_w: width of visible portion of plane (in 16.16)
- * @src_h: height of visible portion of plane (in 16.16)
- * @alpha: opacity of the plane
- * @rotation: rotation of the plane
- * @zpos: priority of the given plane on crtc (optional)
- * Note that multiple active planes on the same crtc can have an identical
- * zpos value. The rule to solving the conflict is to compare the plane
- * object IDs; the plane with a higher ID must be stacked on top of a
- * plane with a lower ID.
- * @normalized_zpos: normalized value of zpos: unique, range from 0 to N-1
- * where N is the number of active planes for given crtc. Note that
- * the driver must set drm_mode_config.normalize_zpos or call
- * drm_atomic_normalize_zpos() to update this before it can be trusted.
- * @src: clipped source coordinates of the plane (in 16.16)
- * @dst: clipped destination coordinates of the plane
- * @state: backpointer to global drm_atomic_state
+ *
+ * Please not that the destination coordinates @crtc_x, @crtc_y, @crtc_h and
+ * @crtc_w and the source coordinates @src_x, @src_y, @src_h and @src_w are the
+ * raw coordinates provided by userspace. Drivers should use
+ * drm_atomic_helper_check_plane_state() and only use the derived rectangles in
+ * @src and @dst to program the hardware.
*/
struct drm_plane_state {
+ /** @plane: backpointer to the plane */
struct drm_plane *plane;
/**
@@ -87,7 +71,7 @@ struct drm_plane_state {
* preserved.
*
* Drivers should store any implicit fence in this from their
- * &drm_plane_helper.prepare_fb callback. See drm_gem_fb_prepare_fb()
+ * &drm_plane_helper_funcs.prepare_fb callback. See drm_gem_fb_prepare_fb()
* and drm_gem_fb_simple_display_pipe_prepare_fb() for suitable helpers.
*/
struct dma_fence *fence;
@@ -108,20 +92,60 @@ struct drm_plane_state {
*/
int32_t crtc_y;
+ /** @crtc_w: width of visible portion of plane on crtc */
+ /** @crtc_h: height of visible portion of plane on crtc */
uint32_t crtc_w, crtc_h;
- /* Source values are 16.16 fixed point */
- uint32_t src_x, src_y;
+ /**
+ * @src_x: left position of visible portion of plane within plane (in
+ * 16.16 fixed point).
+ */
+ uint32_t src_x;
+ /**
+ * @src_y: upper position of visible portion of plane within plane (in
+ * 16.16 fixed point).
+ */
+ uint32_t src_y;
+ /** @src_w: width of visible portion of plane (in 16.16) */
+ /** @src_h: height of visible portion of plane (in 16.16) */
uint32_t src_h, src_w;
- /* Plane opacity */
+ /**
+ * @alpha:
+ * Opacity of the plane with 0 as completely transparent and 0xffff as
+ * completely opaque. See drm_plane_create_alpha_property() for more
+ * details.
+ */
u16 alpha;
- /* Plane rotation */
+ /**
+ * @rotation:
+ * Rotation of the plane. See drm_plane_create_rotation_property() for
+ * more details.
+ */
unsigned int rotation;
- /* Plane zpos */
+ /**
+ * @zpos:
+ * Priority of the given plane on crtc (optional).
+ *
+ * Note that multiple active planes on the same crtc can have an
+ * identical zpos value. The rule to solving the conflict is to compare
+ * the plane object IDs; the plane with a higher ID must be stacked on
+ * top of a plane with a lower ID.
+ *
+ * See drm_plane_create_zpos_property() and
+ * drm_plane_create_zpos_immutable_property() for more details.
+ */
unsigned int zpos;
+
+ /**
+ * @normalized_zpos:
+ * Normalized value of zpos: unique, range from 0 to N-1 where N is the
+ * number of active planes for given crtc. Note that the driver must set
+ * &drm_mode_config.normalize_zpos or call drm_atomic_normalize_zpos() to
+ * update this before it can be trusted.
+ */
unsigned int normalized_zpos;
/**
@@ -138,7 +162,8 @@ struct drm_plane_state {
*/
enum drm_color_range color_range;
- /* Clipped coordinates */
+ /** @src: clipped source coordinates of the plane (in 16.16) */
+ /** @dst: clipped destination coordinates of the plane */
struct drm_rect src, dst;
/**
@@ -157,6 +182,7 @@ struct drm_plane_state {
*/
struct drm_crtc_commit *commit;
+ /** @state: backpointer to global drm_atomic_state */
struct drm_atomic_state *state;
};
@@ -288,6 +314,8 @@ struct drm_plane_funcs {
* cleaned up by calling the @atomic_destroy_state hook in this
* structure.
*
+ * This callback is mandatory for atomic drivers.
+ *
* Atomic drivers which don't subclass &struct drm_plane_state should use
* drm_atomic_helper_plane_duplicate_state(). Drivers that subclass the
* state structure to extend it with driver-private state should use
@@ -314,6 +342,8 @@ struct drm_plane_funcs {
*
* Destroy a state duplicated with @atomic_duplicate_state and release
* or unreference all resources it references
+ *
+ * This callback is mandatory for atomic drivers.
*/
void (*atomic_destroy_state)(struct drm_plane *plane,
struct drm_plane_state *state);
@@ -431,7 +461,10 @@ struct drm_plane_funcs {
* This optional hook is used for the DRM to determine if the given
* format/modifier combination is valid for the plane. This allows the
* DRM to generate the correct format bitmask (which formats apply to
- * which modifier).
+ * which modifier), and to valdiate modifiers at atomic_check time.
+ *
+ * If not present, then any modifier in the plane's modifier
+ * list is allowed with any of the plane's formats.
*
* Returns:
*
@@ -492,30 +525,27 @@ enum drm_plane_type {
/**
* struct drm_plane - central DRM plane control structure
- * @dev: DRM device this plane belongs to
- * @head: for list management
- * @name: human readable name, can be overwritten by the driver
- * @base: base mode object
- * @possible_crtcs: pipes this plane can be bound to
- * @format_types: array of formats supported by this plane
- * @format_count: number of formats supported
- * @format_default: driver hasn't supplied supported formats for the plane
- * @modifiers: array of modifiers supported by this plane
- * @modifier_count: number of modifiers supported
- * @old_fb: Temporary tracking of the old fb while a modeset is ongoing. Used by
- * drm_mode_set_config_internal() to implement correct refcounting.
- * @funcs: helper functions
- * @properties: property tracking for this plane
- * @type: type of plane (overlay, primary, cursor)
- * @alpha_property: alpha property for this plane
- * @zpos_property: zpos property for this plane
- * @rotation_property: rotation property for this plane
- * @helper_private: mid-layer private data
+ *
+ * Planes represent the scanout hardware of a display block. They receive their
+ * input data from a &drm_framebuffer and feed it to a &drm_crtc. Planes control
+ * the color conversion, see `Plane Composition Properties`_ for more details,
+ * and are also involved in the color conversion of input pixels, see `Color
+ * Management Properties`_ for details on that.
*/
struct drm_plane {
+ /** @dev: DRM device this plane belongs to */
struct drm_device *dev;
+
+ /**
+ * @head:
+ *
+ * List of all planes on @dev, linked from &drm_mode_config.plane_list.
+ * Invariant over the lifetime of @dev and therefore does not need
+ * locking.
+ */
struct list_head head;
+ /** @name: human readable name, can be overwritten by the driver */
char *name;
/**
@@ -529,35 +559,62 @@ struct drm_plane {
*/
struct drm_modeset_lock mutex;
+ /** @base: base mode object */
struct drm_mode_object base;
+ /**
+ * @possible_crtcs: pipes this plane can be bound to constructed from
+ * drm_crtc_mask()
+ */
uint32_t possible_crtcs;
+ /** @format_types: array of formats supported by this plane */
uint32_t *format_types;
+ /** @format_count: Size of the array pointed at by @format_types. */
unsigned int format_count;
+ /**
+ * @format_default: driver hasn't supplied supported formats for the
+ * plane. Used by the drm_plane_init compatibility wrapper only.
+ */
bool format_default;
+ /** @modifiers: array of modifiers supported by this plane */
uint64_t *modifiers;
+ /** @modifier_count: Size of the array pointed at by @modifier_count. */
unsigned int modifier_count;
/**
- * @crtc: Currently bound CRTC, only really meaningful for non-atomic
- * drivers. Atomic drivers should instead check &drm_plane_state.crtc.
+ * @crtc:
+ *
+ * Currently bound CRTC, only meaningful for non-atomic drivers. For
+ * atomic drivers this is forced to be NULL, atomic drivers should
+ * instead check &drm_plane_state.crtc.
*/
struct drm_crtc *crtc;
/**
- * @fb: Currently bound framebuffer, only really meaningful for
- * non-atomic drivers. Atomic drivers should instead check
- * &drm_plane_state.fb.
+ * @fb:
+ *
+ * Currently bound framebuffer, only meaningful for non-atomic drivers.
+ * For atomic drivers this is forced to be NULL, atomic drivers should
+ * instead check &drm_plane_state.fb.
*/
struct drm_framebuffer *fb;
+ /**
+ * @old_fb:
+ *
+ * Temporary tracking of the old fb while a modeset is ongoing. Only
+ * used by non-atomic drivers, forced to be NULL for atomic drivers.
+ */
struct drm_framebuffer *old_fb;
+ /** @funcs: plane control functions */
const struct drm_plane_funcs *funcs;
+ /** @properties: property tracking for this plane */
struct drm_object_properties properties;
+ /** @type: Type of plane, see &enum drm_plane_type for details. */
enum drm_plane_type type;
/**
@@ -566,6 +623,7 @@ struct drm_plane {
*/
unsigned index;
+ /** @helper_private: mid-layer private data */
const struct drm_plane_helper_funcs *helper_private;
/**
@@ -583,8 +641,23 @@ struct drm_plane {
*/
struct drm_plane_state *state;
+ /**
+ * @alpha_property:
+ * Optional alpha property for this plane. See
+ * drm_plane_create_alpha_property().
+ */
struct drm_property *alpha_property;
+ /**
+ * @zpos_property:
+ * Optional zpos property for this plane. See
+ * drm_plane_create_zpos_property().
+ */
struct drm_property *zpos_property;
+ /**
+ * @rotation_property:
+ * Optional rotation property for this plane. See
+ * drm_plane_create_rotation_property().
+ */
struct drm_property *rotation_property;
/**
@@ -632,10 +705,20 @@ void drm_plane_cleanup(struct drm_plane *plane);
* Given a registered plane, return the index of that plane within a DRM
* device's list of planes.
*/
-static inline unsigned int drm_plane_index(struct drm_plane *plane)
+static inline unsigned int drm_plane_index(const struct drm_plane *plane)
{
return plane->index;
}
+
+/**
+ * drm_plane_mask - find the mask of a registered plane
+ * @plane: plane to find mask for
+ */
+static inline u32 drm_plane_mask(const struct drm_plane *plane)
+{
+ return 1 << drm_plane_index(plane);
+}
+
struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
void drm_plane_force_disable(struct drm_plane *plane);
@@ -671,7 +754,7 @@ static inline struct drm_plane *drm_plane_find(struct drm_device *dev,
*/
#define drm_for_each_plane_mask(plane, dev, plane_mask) \
list_for_each_entry((plane), &(dev)->mode_config.plane_list, head) \
- for_each_if ((plane_mask) & (1 << drm_plane_index(plane)))
+ for_each_if ((plane_mask) & drm_plane_mask(plane))
/**
* drm_for_each_legacy_plane - iterate over all planes for legacy userspace
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index 28d7ce620729..26cee2934781 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -67,8 +67,10 @@ int drm_plane_helper_update(struct drm_plane *plane, struct drm_crtc *crtc,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h);
-int drm_plane_helper_disable(struct drm_plane *plane);
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx);
+int drm_plane_helper_disable(struct drm_plane *plane,
+ struct drm_modeset_acquire_ctx *ctx);
/* For use by drm_crtc_helper.c */
int drm_plane_helper_commit(struct drm_plane *plane,
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index 4d5f5d6cf6a6..d716d653b096 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -82,7 +82,7 @@ int drm_gem_prime_fd_to_handle(struct drm_device *dev,
struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
struct dma_buf_export_info *exp_info);
void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
-int drm_gem_map_attach(struct dma_buf *dma_buf, struct device *target_dev,
+int drm_gem_map_attach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach);
void drm_gem_map_detach(struct dma_buf *dma_buf,
struct dma_buf_attachment *attach);
@@ -93,10 +93,6 @@ void drm_gem_unmap_dma_buf(struct dma_buf_attachment *attach,
enum dma_data_direction dir);
void *drm_gem_dmabuf_vmap(struct dma_buf *dma_buf);
void drm_gem_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr);
-void *drm_gem_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num);
-void drm_gem_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
- unsigned long page_num, void *addr);
void *drm_gem_dmabuf_kmap(struct dma_buf *dma_buf, unsigned long page_num);
void drm_gem_dmabuf_kunmap(struct dma_buf *dma_buf, unsigned long page_num,
void *addr);
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index e1a46e9991cc..f3e6eed3e79c 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -69,16 +69,21 @@
struct drm_printer {
/* private: */
void (*printfn)(struct drm_printer *p, struct va_format *vaf);
+ void (*puts)(struct drm_printer *p, const char *str);
void *arg;
const char *prefix;
};
+void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf);
+void __drm_puts_coredump(struct drm_printer *p, const char *str);
void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf);
+void __drm_puts_seq_file(struct drm_printer *p, const char *str);
void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf);
void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf);
__printf(2, 3)
void drm_printf(struct drm_printer *p, const char *f, ...);
+void drm_puts(struct drm_printer *p, const char *str);
__printf(2, 0)
/**
@@ -105,6 +110,71 @@ drm_vprintf(struct drm_printer *p, const char *fmt, va_list *va)
drm_printf((printer), "%.*s" fmt, (indent), "\t\t\t\t\tX", ##__VA_ARGS__)
/**
+ * struct drm_print_iterator - local struct used with drm_printer_coredump
+ * @data: Pointer to the devcoredump output buffer
+ * @start: The offset within the buffer to start writing
+ * @remain: The number of bytes to write for this iteration
+ */
+struct drm_print_iterator {
+ void *data;
+ ssize_t start;
+ ssize_t remain;
+ /* private: */
+ ssize_t offset;
+};
+
+/**
+ * drm_coredump_printer - construct a &drm_printer that can output to a buffer
+ * from the read function for devcoredump
+ * @iter: A pointer to a struct drm_print_iterator for the read instance
+ *
+ * This wrapper extends drm_printf() to work with a dev_coredumpm() callback
+ * function. The passed in drm_print_iterator struct contains the buffer
+ * pointer, size and offset as passed in from devcoredump.
+ *
+ * For example::
+ *
+ * void coredump_read(char *buffer, loff_t offset, size_t count,
+ * void *data, size_t datalen)
+ * {
+ * struct drm_print_iterator iter;
+ * struct drm_printer p;
+ *
+ * iter.data = buffer;
+ * iter.start = offset;
+ * iter.remain = count;
+ *
+ * p = drm_coredump_printer(&iter);
+ *
+ * drm_printf(p, "foo=%d\n", foo);
+ * }
+ *
+ * void makecoredump(...)
+ * {
+ * ...
+ * dev_coredumpm(dev, THIS_MODULE, data, 0, GFP_KERNEL,
+ * coredump_read, ...)
+ * }
+ *
+ * RETURNS:
+ * The &drm_printer object
+ */
+static inline struct drm_printer
+drm_coredump_printer(struct drm_print_iterator *iter)
+{
+ struct drm_printer p = {
+ .printfn = __drm_printfn_coredump,
+ .puts = __drm_puts_coredump,
+ .arg = iter,
+ };
+
+ /* Set the internal offset of the iterator to zero */
+ iter->offset = 0;
+
+ return p;
+}
+
+/**
* drm_seq_file_printer - construct a &drm_printer that outputs to &seq_file
* @f: the &struct seq_file to output to
*
@@ -115,6 +185,7 @@ static inline struct drm_printer drm_seq_file_printer(struct seq_file *f)
{
struct drm_printer p = {
.printfn = __drm_printfn_seq_file,
+ .puts = __drm_puts_seq_file,
.arg = f,
};
return p;
@@ -195,6 +266,7 @@ static inline struct drm_printer drm_debug_printer(const char *prefix)
#define DRM_UT_VBL 0x20
#define DRM_UT_STATE 0x40
#define DRM_UT_LEASE 0x80
+#define DRM_UT_DP 0x100
__printf(3, 4)
void drm_dev_printk(const struct device *dev, const char *level,
@@ -307,6 +379,11 @@ void drm_err(const char *format, ...);
#define DRM_DEBUG_LEASE(fmt, ...) \
drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+#define DRM_DEV_DEBUG_DP(dev, fmt, ...) \
+ drm_dev_dbg(dev, DRM_UT_DP, fmt, ## __VA_ARGS__)
+#define DRM_DEBUG_DP(dev, fmt, ...) \
+ drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
+
#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...) \
({ \
static DEFINE_RATELIMIT_STATE(_rs, \
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index 1d5c0b2a8956..c030f6ccab99 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -147,10 +147,10 @@ struct drm_property {
* properties are not exposed to legacy userspace.
*
* DRM_MODE_PROP_IMMUTABLE
- * Set for properties where userspace cannot be changed by
+ * Set for properties whose values cannot be changed by
* userspace. The kernel is allowed to update the value of these
* properties. This is generally used to expose probe state to
- * usersapce, e.g. the EDID, or the connector path property on DP
+ * userspace, e.g. the EDID, or the connector path property on DP
* MST sinks.
*/
uint32_t flags;
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index 8758df94e9a0..c7987daeaed0 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -41,6 +41,7 @@ struct drm_vma_offset_node {
rwlock_t vm_lock;
struct drm_mm_node vm_node;
struct rb_root vm_files;
+ bool readonly:1;
};
struct drm_vma_offset_manager {
diff --git a/include/drm/drm_writeback.h b/include/drm/drm_writeback.h
new file mode 100644
index 000000000000..23df9d463003
--- /dev/null
+++ b/include/drm/drm_writeback.h
@@ -0,0 +1,136 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
+ * Author: Brian Starkey <brian.starkey@arm.com>
+ *
+ * This program is free software and is provided to you under the terms of the
+ * GNU General Public License version 2 as published by the Free Software
+ * Foundation, and any use by you of this program is subject to the terms
+ * of such GNU licence.
+ */
+
+#ifndef __DRM_WRITEBACK_H__
+#define __DRM_WRITEBACK_H__
+#include <drm/drm_connector.h>
+#include <drm/drm_encoder.h>
+#include <linux/workqueue.h>
+
+struct drm_writeback_connector {
+ struct drm_connector base;
+
+ /**
+ * @encoder: Internal encoder used by the connector to fulfill
+ * the DRM framework requirements. The users of the
+ * @drm_writeback_connector control the behaviour of the @encoder
+ * by passing the @enc_funcs parameter to drm_writeback_connector_init()
+ * function.
+ */
+ struct drm_encoder encoder;
+
+ /**
+ * @pixel_formats_blob_ptr:
+ *
+ * DRM blob property data for the pixel formats list on writeback
+ * connectors
+ * See also drm_writeback_connector_init()
+ */
+ struct drm_property_blob *pixel_formats_blob_ptr;
+
+ /** @job_lock: Protects job_queue */
+ spinlock_t job_lock;
+
+ /**
+ * @job_queue:
+ *
+ * Holds a list of a connector's writeback jobs; the last item is the
+ * most recent. The first item may be either waiting for the hardware
+ * to begin writing, or currently being written.
+ *
+ * See also: drm_writeback_queue_job() and
+ * drm_writeback_signal_completion()
+ */
+ struct list_head job_queue;
+
+ /**
+ * @fence_context:
+ *
+ * timeline context used for fence operations.
+ */
+ unsigned int fence_context;
+ /**
+ * @fence_lock:
+ *
+ * spinlock to protect the fences in the fence_context.
+ */
+ spinlock_t fence_lock;
+ /**
+ * @fence_seqno:
+ *
+ * Seqno variable used as monotonic counter for the fences
+ * created on the connector's timeline.
+ */
+ unsigned long fence_seqno;
+ /**
+ * @timeline_name:
+ *
+ * The name of the connector's fence timeline.
+ */
+ char timeline_name[32];
+};
+
+struct drm_writeback_job {
+ /**
+ * @cleanup_work:
+ *
+ * Used to allow drm_writeback_signal_completion to defer dropping the
+ * framebuffer reference to a workqueue
+ */
+ struct work_struct cleanup_work;
+
+ /**
+ * @list_entry:
+ *
+ * List item for the writeback connector's @job_queue
+ */
+ struct list_head list_entry;
+
+ /**
+ * @fb:
+ *
+ * Framebuffer to be written to by the writeback connector. Do not set
+ * directly, use drm_atomic_set_writeback_fb_for_connector()
+ */
+ struct drm_framebuffer *fb;
+
+ /**
+ * @out_fence:
+ *
+ * Fence which will signal once the writeback has completed
+ */
+ struct dma_fence *out_fence;
+};
+
+static inline struct drm_writeback_connector *
+drm_connector_to_writeback(struct drm_connector *connector)
+{
+ return container_of(connector, struct drm_writeback_connector, base);
+}
+
+int drm_writeback_connector_init(struct drm_device *dev,
+ struct drm_writeback_connector *wb_connector,
+ const struct drm_connector_funcs *con_funcs,
+ const struct drm_encoder_helper_funcs *enc_helper_funcs,
+ const u32 *formats, int n_formats);
+
+void drm_writeback_queue_job(struct drm_writeback_connector *wb_connector,
+ struct drm_writeback_job *job);
+
+void drm_writeback_cleanup_job(struct drm_writeback_job *job);
+
+void
+drm_writeback_signal_completion(struct drm_writeback_connector *wb_connector,
+ int status);
+
+struct dma_fence *
+drm_writeback_get_out_fence(struct drm_writeback_connector *wb_connector);
+#endif
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index dec655894d08..21c648b0b2a1 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -27,6 +27,8 @@
#include <drm/spsc_queue.h>
#include <linux/dma-fence.h>
+#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
+
struct drm_gpu_scheduler;
struct drm_sched_rq;
@@ -43,18 +45,37 @@ enum drm_sched_priority {
};
/**
- * drm_sched_entity - A wrapper around a job queue (typically attached
- * to the DRM file_priv).
+ * struct drm_sched_entity - A wrapper around a job queue (typically
+ * attached to the DRM file_priv).
+ *
+ * @list: used to append this struct to the list of entities in the
+ * runqueue.
+ * @rq: runqueue to which this entity belongs.
+ * @rq_lock: lock to modify the runqueue to which this entity belongs.
+ * @job_queue: the list of jobs of this entity.
+ * @fence_seq: a linearly increasing seqno incremented with each
+ * new &drm_sched_fence which is part of the entity.
+ * @fence_context: a unique context for all the fences which belong
+ * to this entity.
+ * The &drm_sched_fence.scheduled uses the
+ * fence_context but &drm_sched_fence.finished uses
+ * fence_context + 1.
+ * @dependency: the dependency fence of the job which is on the top
+ * of the job queue.
+ * @cb: callback for the dependency fence above.
+ * @guilty: points to ctx's guilty.
+ * @fini_status: contains the exit status in case the process was signalled.
+ * @last_scheduled: points to the finished fence of the last scheduled job.
+ * @last_user: last group leader pushing a job into the entity.
*
* Entities will emit jobs in order to their corresponding hardware
* ring, and the scheduler will alternate between entities based on
* scheduling policy.
-*/
+ */
struct drm_sched_entity {
struct list_head list;
struct drm_sched_rq *rq;
spinlock_t rq_lock;
- struct drm_gpu_scheduler *sched;
struct spsc_queue job_queue;
@@ -63,47 +84,98 @@ struct drm_sched_entity {
struct dma_fence *dependency;
struct dma_fence_cb cb;
- atomic_t *guilty; /* points to ctx's guilty */
- int fini_status;
- struct dma_fence *last_scheduled;
+ atomic_t *guilty;
+ struct dma_fence *last_scheduled;
+ struct task_struct *last_user;
};
/**
+ * struct drm_sched_rq - queue of entities to be scheduled.
+ *
+ * @lock: to modify the entities list.
+ * @sched: the scheduler to which this rq belongs to.
+ * @entities: list of the entities to be scheduled.
+ * @current_entity: the entity which is to be scheduled.
+ *
* Run queue is a set of entities scheduling command submissions for
* one specific ring. It implements the scheduling policy that selects
* the next entity to emit commands from.
-*/
+ */
struct drm_sched_rq {
spinlock_t lock;
+ struct drm_gpu_scheduler *sched;
struct list_head entities;
struct drm_sched_entity *current_entity;
};
+/**
+ * struct drm_sched_fence - fences corresponding to the scheduling of a job.
+ */
struct drm_sched_fence {
+ /**
+ * @scheduled: this fence is what will be signaled by the scheduler
+ * when the job is scheduled.
+ */
struct dma_fence scheduled;
- /* This fence is what will be signaled by the scheduler when
- * the job is completed.
- *
- * When setting up an out fence for the job, you should use
- * this, since it's available immediately upon
- * drm_sched_job_init(), and the fence returned by the driver
- * from run_job() won't be created until the dependencies have
- * resolved.
- */
+ /**
+ * @finished: this fence is what will be signaled by the scheduler
+ * when the job is completed.
+ *
+ * When setting up an out fence for the job, you should use
+ * this, since it's available immediately upon
+ * drm_sched_job_init(), and the fence returned by the driver
+ * from run_job() won't be created until the dependencies have
+ * resolved.
+ */
struct dma_fence finished;
+ /**
+ * @cb: the callback for the parent fence below.
+ */
struct dma_fence_cb cb;
+ /**
+ * @parent: the fence returned by &drm_sched_backend_ops.run_job
+ * when scheduling the job on hardware. We signal the
+ * &drm_sched_fence.finished fence once parent is signalled.
+ */
struct dma_fence *parent;
+ /**
+ * @sched: the scheduler instance to which the job having this struct
+ * belongs to.
+ */
struct drm_gpu_scheduler *sched;
+ /**
+ * @lock: the lock used by the scheduled and the finished fences.
+ */
spinlock_t lock;
+ /**
+ * @owner: job owner for debugging
+ */
void *owner;
};
struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
/**
- * drm_sched_job - A job to be run by an entity.
+ * struct drm_sched_job - A job to be run by an entity.
+ *
+ * @queue_node: used to append this struct to the queue of jobs in an entity.
+ * @sched: the scheduler instance on which this job is scheduled.
+ * @s_fence: contains the fences for the scheduling of job.
+ * @finish_cb: the callback for the finished fence.
+ * @finish_work: schedules the function @drm_sched_job_finish once the job has
+ * finished to remove the job from the
+ * @drm_gpu_scheduler.ring_mirror_list.
+ * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
+ * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the timeout
+ * interval is over.
+ * @id: a unique id assigned to each job scheduled on the scheduler.
+ * @karma: increment on every hang caused by this job. If this exceeds the hang
+ * limit of the scheduler then the job is marked guilty and will not
+ * be scheduled further.
+ * @s_priority: the priority of the job.
+ * @entity: the entity to which this job belongs.
*
* A job is created by the driver using drm_sched_job_init(), and
* should call drm_sched_entity_push_job() once it wants the scheduler
@@ -130,38 +202,64 @@ static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
}
/**
+ * struct drm_sched_backend_ops
+ *
* Define the backend operations called by the scheduler,
- * these functions should be implemented in driver side
-*/
+ * these functions should be implemented in driver side.
+ */
struct drm_sched_backend_ops {
- /* Called when the scheduler is considering scheduling this
- * job next, to get another struct dma_fence for this job to
+ /**
+ * @dependency: Called when the scheduler is considering scheduling
+ * this job next, to get another struct dma_fence for this job to
* block on. Once it returns NULL, run_job() may be called.
*/
struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity);
- /* Called to execute the job once all of the dependencies have
- * been resolved. This may be called multiple times, if
+ /**
+ * @run_job: Called to execute the job once all of the dependencies
+ * have been resolved. This may be called multiple times, if
* timedout_job() has happened and drm_sched_job_recovery()
* decides to try it again.
*/
struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
- /* Called when a job has taken too long to execute, to trigger
- * GPU recovery.
+ /**
+ * @timedout_job: Called when a job has taken too long to execute,
+ * to trigger GPU recovery.
*/
void (*timedout_job)(struct drm_sched_job *sched_job);
- /* Called once the job's finished fence has been signaled and
- * it's time to clean it up.
+ /**
+ * @free_job: Called once the job's finished fence has been signaled
+ * and it's time to clean it up.
*/
void (*free_job)(struct drm_sched_job *sched_job);
};
/**
- * One scheduler is implemented for each hardware ring
-*/
+ * struct drm_gpu_scheduler
+ *
+ * @ops: backend operations provided by the driver.
+ * @hw_submission_limit: the max size of the hardware queue.
+ * @timeout: the time after which a job is removed from the scheduler.
+ * @name: name of the ring for which this scheduler is being used.
+ * @sched_rq: priority wise array of run queues.
+ * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
+ * is ready to be scheduled.
+ * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
+ * waits on this wait queue until all the scheduled jobs are
+ * finished.
+ * @hw_rq_count: the number of jobs currently in the hardware queue.
+ * @job_id_count: used to assign unique id to the each job.
+ * @thread: the kthread on which the scheduler which run.
+ * @ring_mirror_list: the list of jobs which are currently in the job queue.
+ * @job_list_lock: lock to protect the ring_mirror_list.
+ * @hang_limit: once the hangs by a job crosses this limit then it is marked
+ * guilty and it will be considered for scheduling further.
+ *
+ * One scheduler is implemented for each hardware ring.
+ */
struct drm_gpu_scheduler {
const struct drm_sched_backend_ops *ops;
uint32_t hw_submission_limit;
@@ -184,16 +282,13 @@ int drm_sched_init(struct drm_gpu_scheduler *sched,
const char *name);
void drm_sched_fini(struct drm_gpu_scheduler *sched);
-int drm_sched_entity_init(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity,
- struct drm_sched_rq *rq,
+int drm_sched_entity_init(struct drm_sched_entity *entity,
+ struct drm_sched_rq **rq_list,
+ unsigned int num_rq_list,
atomic_t *guilty);
-void drm_sched_entity_do_release(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity);
-void drm_sched_entity_cleanup(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity);
-void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity);
+long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
+void drm_sched_entity_fini(struct drm_sched_entity *entity);
+void drm_sched_entity_destroy(struct drm_sched_entity *entity);
void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
struct drm_sched_entity *entity);
void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
@@ -204,7 +299,6 @@ struct drm_sched_fence *drm_sched_fence_create(
void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
void drm_sched_fence_finished(struct drm_sched_fence *fence);
int drm_sched_job_init(struct drm_sched_job *job,
- struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity,
void *owner);
void drm_sched_hw_job_reset(struct drm_gpu_scheduler *sched,
diff --git a/include/drm/i915_component.h b/include/drm/i915_component.h
index 346b1f5cb180..fca22d463e1b 100644
--- a/include/drm/i915_component.h
+++ b/include/drm/i915_component.h
@@ -24,101 +24,26 @@
#ifndef _I915_COMPONENT_H_
#define _I915_COMPONENT_H_
+#include "drm_audio_component.h"
+
/* MAX_PORT is the number of port
* It must be sync with I915_MAX_PORTS defined i915_drv.h
*/
#define MAX_PORTS 6
/**
- * struct i915_audio_component_ops - Ops implemented by i915 driver, called by hda driver
- */
-struct i915_audio_component_ops {
- /**
- * @owner: i915 module
- */
- struct module *owner;
- /**
- * @get_power: get the POWER_DOMAIN_AUDIO power well
- *
- * Request the power well to be turned on.
- */
- void (*get_power)(struct device *);
- /**
- * @put_power: put the POWER_DOMAIN_AUDIO power well
- *
- * Allow the power well to be turned off.
- */
- void (*put_power)(struct device *);
- /**
- * @codec_wake_override: Enable/disable codec wake signal
- */
- void (*codec_wake_override)(struct device *, bool enable);
- /**
- * @get_cdclk_freq: Get the Core Display Clock in kHz
- */
- int (*get_cdclk_freq)(struct device *);
- /**
- * @sync_audio_rate: set n/cts based on the sample rate
- *
- * Called from audio driver. After audio driver sets the
- * sample rate, it will call this function to set n/cts
- */
- int (*sync_audio_rate)(struct device *, int port, int pipe, int rate);
- /**
- * @get_eld: fill the audio state and ELD bytes for the given port
- *
- * Called from audio driver to get the HDMI/DP audio state of the given
- * digital port, and also fetch ELD bytes to the given pointer.
- *
- * It returns the byte size of the original ELD (not the actually
- * copied size), zero for an invalid ELD, or a negative error code.
- *
- * Note that the returned size may be over @max_bytes. Then it
- * implies that only a part of ELD has been copied to the buffer.
- */
- int (*get_eld)(struct device *, int port, int pipe, bool *enabled,
- unsigned char *buf, int max_bytes);
-};
-
-/**
- * struct i915_audio_component_audio_ops - Ops implemented by hda driver, called by i915 driver
- */
-struct i915_audio_component_audio_ops {
- /**
- * @audio_ptr: Pointer to be used in call to pin_eld_notify
- */
- void *audio_ptr;
- /**
- * @pin_eld_notify: Notify the HDA driver that pin sense and/or ELD information has changed
- *
- * Called when the i915 driver has set up audio pipeline or has just
- * begun to tear it down. This allows the HDA driver to update its
- * status accordingly (even when the HDA controller is in power save
- * mode).
- */
- void (*pin_eld_notify)(void *audio_ptr, int port, int pipe);
-};
-
-/**
* struct i915_audio_component - Used for direct communication between i915 and hda drivers
*/
struct i915_audio_component {
/**
- * @dev: i915 device, used as parameter for ops
+ * @base: the drm_audio_component base class
*/
- struct device *dev;
+ struct drm_audio_component base;
+
/**
* @aud_sample_rate: the array of audio sample rate per port
*/
int aud_sample_rate[MAX_PORTS];
- /**
- * @ops: Ops implemented by i915 driver, called by hda driver
- */
- const struct i915_audio_component_ops *ops;
- /**
- * @audio_ops: Ops implemented by hda driver, called by i915 driver
- */
- const struct i915_audio_component_audio_ops *audio_ops;
};
#endif /* _I915_COMPONENT_H_ */
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index c9e5a6621b95..c44703f471b3 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -95,7 +95,9 @@ extern struct resource intel_graphics_stolen_res;
#define I845_TSEG_SIZE_512K (2 << 1)
#define I845_TSEG_SIZE_1M (3 << 1)
-#define INTEL_BSM 0x5c
+#define INTEL_BSM 0x5c
+#define INTEL_GEN11_BSM_DW0 0xc0
+#define INTEL_GEN11_BSM_DW1 0xc4
#define INTEL_BSM_MASK (-(1u << 20))
#endif /* _I915_DRM_H_ */
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index bab70ff6e78b..fbf5cfc9b352 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -349,7 +349,6 @@
#define INTEL_KBL_GT2_IDS(info) \
INTEL_VGA_DEVICE(0x5916, info), /* ULT GT2 */ \
INTEL_VGA_DEVICE(0x5917, info), /* Mobile GT2 */ \
- INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x5921, info), /* ULT GT2F */ \
INTEL_VGA_DEVICE(0x591E, info), /* ULX GT2 */ \
INTEL_VGA_DEVICE(0x5912, info), /* DT GT2 */ \
@@ -365,11 +364,17 @@
#define INTEL_KBL_GT4_IDS(info) \
INTEL_VGA_DEVICE(0x593B, info) /* Halo GT4 */
+/* AML/KBL Y GT2 */
+#define INTEL_AML_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x591C, info), /* ULX GT2 */ \
+ INTEL_VGA_DEVICE(0x87C0, info) /* ULX GT2 */
+
#define INTEL_KBL_IDS(info) \
INTEL_KBL_GT1_IDS(info), \
INTEL_KBL_GT2_IDS(info), \
INTEL_KBL_GT3_IDS(info), \
- INTEL_KBL_GT4_IDS(info)
+ INTEL_KBL_GT4_IDS(info), \
+ INTEL_AML_GT2_IDS(info)
/* CFL S */
#define INTEL_CFL_S_GT1_IDS(info) \
@@ -388,32 +393,40 @@
INTEL_VGA_DEVICE(0x3E9B, info), /* Halo GT2 */ \
INTEL_VGA_DEVICE(0x3E94, info) /* Halo GT2 */
-/* CFL U GT1 */
-#define INTEL_CFL_U_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA1, info), \
- INTEL_VGA_DEVICE(0x3EA4, info)
-
/* CFL U GT2 */
#define INTEL_CFL_U_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA0, info), \
- INTEL_VGA_DEVICE(0x3EA3, info), \
INTEL_VGA_DEVICE(0x3EA9, info)
/* CFL U GT3 */
#define INTEL_CFL_U_GT3_IDS(info) \
- INTEL_VGA_DEVICE(0x3EA2, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA5, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA6, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA7, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x3EA8, info) /* ULT GT3 */
+/* WHL/CFL U GT1 */
+#define INTEL_WHL_U_GT1_IDS(info) \
+ INTEL_VGA_DEVICE(0x3EA1, info)
+
+/* WHL/CFL U GT2 */
+#define INTEL_WHL_U_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x3EA0, info)
+
+/* WHL/CFL U GT3 */
+#define INTEL_WHL_U_GT3_IDS(info) \
+ INTEL_VGA_DEVICE(0x3EA2, info), \
+ INTEL_VGA_DEVICE(0x3EA3, info), \
+ INTEL_VGA_DEVICE(0x3EA4, info)
+
#define INTEL_CFL_IDS(info) \
INTEL_CFL_S_GT1_IDS(info), \
INTEL_CFL_S_GT2_IDS(info), \
INTEL_CFL_H_GT2_IDS(info), \
- INTEL_CFL_U_GT1_IDS(info), \
INTEL_CFL_U_GT2_IDS(info), \
- INTEL_CFL_U_GT3_IDS(info)
+ INTEL_CFL_U_GT3_IDS(info), \
+ INTEL_WHL_U_GT1_IDS(info), \
+ INTEL_WHL_U_GT2_IDS(info), \
+ INTEL_WHL_U_GT3_IDS(info)
/* CNL */
#define INTEL_CNL_IDS(info) \
diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h
index 56e4a916b5e8..fe9827d0ca8a 100644
--- a/include/drm/tinydrm/tinydrm.h
+++ b/include/drm/tinydrm/tinydrm.h
@@ -16,16 +16,31 @@
/**
* struct tinydrm_device - tinydrm device
- * @drm: DRM device
- * @pipe: Display pipe structure
- * @dirty_lock: Serializes framebuffer flushing
- * @fb_funcs: Framebuffer functions used when creating framebuffers
*/
struct tinydrm_device {
+ /**
+ * @drm: DRM device
+ */
struct drm_device *drm;
+
+ /**
+ * @pipe: Display pipe structure
+ */
struct drm_simple_display_pipe pipe;
+
+ /**
+ * @dirty_lock: Serializes framebuffer flushing
+ */
struct mutex dirty_lock;
+
+ /**
+ * @fb_funcs: Framebuffer functions used when creating framebuffers
+ */
const struct drm_framebuffer_funcs *fb_funcs;
+
+ /**
+ * @fb_dirty: Framebuffer dirty callback
+ */
int (*fb_dirty)(struct drm_framebuffer *framebuffer,
struct drm_file *file_priv, unsigned flags,
unsigned color, struct drm_clip_rect *clips,
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index c67977aa1a0e..a01ba2032f0e 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -284,17 +284,29 @@ struct ttm_operation_ctx {
#define TTM_OPT_FLAG_FORCE_ALLOC 0x2
/**
+ * ttm_bo_get - reference a struct ttm_buffer_object
+ *
+ * @bo: The buffer object.
+ */
+static inline void ttm_bo_get(struct ttm_buffer_object *bo)
+{
+ kref_get(&bo->kref);
+}
+
+/**
* ttm_bo_reference - reference a struct ttm_buffer_object
*
* @bo: The buffer object.
*
* Returns a refcounted pointer to a buffer object.
+ *
+ * This function is deprecated. Use @ttm_bo_get instead.
*/
static inline struct ttm_buffer_object *
ttm_bo_reference(struct ttm_buffer_object *bo)
{
- kref_get(&bo->kref);
+ ttm_bo_get(bo);
return bo;
}
@@ -346,11 +358,22 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
struct ttm_operation_ctx *ctx);
/**
+ * ttm_bo_put
+ *
+ * @bo: The buffer object.
+ *
+ * Unreference a buffer object.
+ */
+void ttm_bo_put(struct ttm_buffer_object *bo);
+
+/**
* ttm_bo_unref
*
* @bo: The buffer object.
*
* Unreference and clear a pointer to a buffer object.
+ *
+ * This function is deprecated. Use @ttm_bo_put instead.
*/
void ttm_bo_unref(struct ttm_buffer_object **bo);
diff --git a/include/drm/ttm/ttm_set_memory.h b/include/drm/ttm/ttm_set_memory.h
new file mode 100644
index 000000000000..7c492b49e38c
--- /dev/null
+++ b/include/drm/ttm/ttm_set_memory.h
@@ -0,0 +1,150 @@
+/**************************************************************************
+ *
+ * Copyright (c) 2018 Advanced Micro Devices, Inc.
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors: Huang Rui <ray.huang@amd.com>
+ */
+
+#ifndef TTM_SET_MEMORY
+#define TTM_SET_MEMORY
+
+#include <linux/mm.h>
+
+#ifdef CONFIG_X86
+
+#include <asm/set_memory.h>
+
+static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
+{
+ return set_pages_array_wb(pages, addrinarray);
+}
+
+static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
+{
+ return set_pages_array_wc(pages, addrinarray);
+}
+
+static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
+{
+ return set_pages_array_uc(pages, addrinarray);
+}
+
+static inline int ttm_set_pages_wb(struct page *page, int numpages)
+{
+ return set_pages_wb(page, numpages);
+}
+
+static inline int ttm_set_pages_wc(struct page *page, int numpages)
+{
+ unsigned long addr = (unsigned long)page_address(page);
+
+ return set_memory_wc(addr, numpages);
+}
+
+static inline int ttm_set_pages_uc(struct page *page, int numpages)
+{
+ return set_pages_uc(page, numpages);
+}
+
+#else /* for CONFIG_X86 */
+
+#if IS_ENABLED(CONFIG_AGP)
+
+#include <asm/agp.h>
+
+static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
+{
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ unmap_page_from_agp(pages[i]);
+ return 0;
+}
+
+static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
+{
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+ return 0;
+}
+
+static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
+{
+ int i;
+
+ for (i = 0; i < addrinarray; i++)
+ map_page_into_agp(pages[i]);
+ return 0;
+}
+
+static inline int ttm_set_pages_wb(struct page *page, int numpages)
+{
+ int i;
+
+ for (i = 0; i < numpages; i++)
+ unmap_page_from_agp(page++);
+ return 0;
+}
+
+#else /* for CONFIG_AGP */
+
+static inline int ttm_set_pages_array_wb(struct page **pages, int addrinarray)
+{
+ return 0;
+}
+
+static inline int ttm_set_pages_array_wc(struct page **pages, int addrinarray)
+{
+ return 0;
+}
+
+static inline int ttm_set_pages_array_uc(struct page **pages, int addrinarray)
+{
+ return 0;
+}
+
+static inline int ttm_set_pages_wb(struct page *page, int numpages)
+{
+ return 0;
+}
+
+#endif /* for CONFIG_AGP */
+
+static inline int ttm_set_pages_wc(struct page *page, int numpages)
+{
+ return 0;
+}
+
+static inline int ttm_set_pages_uc(struct page *page, int numpages)
+{
+ return 0;
+}
+
+#endif /* for CONFIG_X86 */
+
+#endif
diff --git a/include/dt-bindings/bus/ti-sysc.h b/include/dt-bindings/bus/ti-sysc.h
index 2c005376ac0e..7138384e2ef9 100644
--- a/include/dt-bindings/bus/ti-sysc.h
+++ b/include/dt-bindings/bus/ti-sysc.h
@@ -15,6 +15,8 @@
/* SmartReflex sysc found on 36xx and later */
#define SYSC_OMAP3_SR_ENAWAKEUP (1 << 26)
+#define SYSC_DRA7_MCAN_ENAWAKEUP (1 << 4)
+
/* SYSCONFIG STANDBYMODE/MIDLEMODE/SIDLEMODE supported by hardware */
#define SYSC_IDLE_FORCE 0
#define SYSC_IDLE_NO 1
diff --git a/include/dt-bindings/clock/actions,s700-cmu.h b/include/dt-bindings/clock/actions,s700-cmu.h
new file mode 100644
index 000000000000..3e1942996724
--- /dev/null
+++ b/include/dt-bindings/clock/actions,s700-cmu.h
@@ -0,0 +1,118 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Device Tree binding constants for Actions Semi S700 Clock Management Unit
+ *
+ * Copyright (c) 2014 Actions Semi Inc.
+ * Author: David Liu <liuwei@actions-semi.com>
+ *
+ * Author: Pathiban Nallathambi <pn@denx.de>
+ * Author: Saravanan Sekar <sravanhome@gmail.com>
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_S700_H
+#define __DT_BINDINGS_CLOCK_S700_H
+
+#define CLK_NONE 0
+
+/* pll clocks */
+#define CLK_CORE_PLL 1
+#define CLK_DEV_PLL 2
+#define CLK_DDR_PLL 3
+#define CLK_NAND_PLL 4
+#define CLK_DISPLAY_PLL 5
+#define CLK_TVOUT_PLL 6
+#define CLK_CVBS_PLL 7
+#define CLK_AUDIO_PLL 8
+#define CLK_ETHERNET_PLL 9
+
+/* system clock */
+#define CLK_CPU 10
+#define CLK_DEV 11
+#define CLK_AHB 12
+#define CLK_APB 13
+#define CLK_DMAC 14
+#define CLK_NOC0_CLK_MUX 15
+#define CLK_NOC1_CLK_MUX 16
+#define CLK_HP_CLK_MUX 17
+#define CLK_HP_CLK_DIV 18
+#define CLK_NOC1_CLK_DIV 19
+#define CLK_NOC0 20
+#define CLK_NOC1 21
+#define CLK_SENOR_SRC 22
+
+/* peripheral device clock */
+#define CLK_GPIO 23
+#define CLK_TIMER 24
+#define CLK_DSI 25
+#define CLK_CSI 26
+#define CLK_SI 27
+#define CLK_DE 28
+#define CLK_HDE 29
+#define CLK_VDE 30
+#define CLK_VCE 31
+#define CLK_NAND 32
+#define CLK_SD0 33
+#define CLK_SD1 34
+#define CLK_SD2 35
+
+#define CLK_UART0 36
+#define CLK_UART1 37
+#define CLK_UART2 38
+#define CLK_UART3 39
+#define CLK_UART4 40
+#define CLK_UART5 41
+#define CLK_UART6 42
+
+#define CLK_PWM0 43
+#define CLK_PWM1 44
+#define CLK_PWM2 45
+#define CLK_PWM3 46
+#define CLK_PWM4 47
+#define CLK_PWM5 48
+#define CLK_GPU3D 49
+
+#define CLK_I2C0 50
+#define CLK_I2C1 51
+#define CLK_I2C2 52
+#define CLK_I2C3 53
+
+#define CLK_SPI0 54
+#define CLK_SPI1 55
+#define CLK_SPI2 56
+#define CLK_SPI3 57
+
+#define CLK_USB3_480MPLL0 58
+#define CLK_USB3_480MPHY0 59
+#define CLK_USB3_5GPHY 60
+#define CLK_USB3_CCE 61
+#define CLK_USB3_MAC 62
+
+#define CLK_LCD 63
+#define CLK_HDMI_AUDIO 64
+#define CLK_I2SRX 65
+#define CLK_I2STX 66
+
+#define CLK_SENSOR0 67
+#define CLK_SENSOR1 68
+
+#define CLK_HDMI_DEV 69
+
+#define CLK_ETHERNET 70
+#define CLK_RMII_REF 71
+
+#define CLK_USB2H0_PLLEN 72
+#define CLK_USB2H0_PHY 73
+#define CLK_USB2H0_CCE 74
+#define CLK_USB2H1_PLLEN 75
+#define CLK_USB2H1_PHY 76
+#define CLK_USB2H1_CCE 77
+
+#define CLK_TVOUT 78
+
+#define CLK_THERMAL_SENSOR 79
+
+#define CLK_IRC_SWITCH 80
+#define CLK_PCM1 81
+#define CLK_NR_CLKS (CLK_PCM1 + 1)
+
+#endif /* __DT_BINDINGS_CLOCK_S700_H */
diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h
index 44761849fcbe..f43738607d77 100644
--- a/include/dt-bindings/clock/aspeed-clock.h
+++ b/include/dt-bindings/clock/aspeed-clock.h
@@ -25,7 +25,7 @@
#define ASPEED_CLK_GATE_RSACLK 19
#define ASPEED_CLK_GATE_UART3CLK 20
#define ASPEED_CLK_GATE_UART4CLK 21
-#define ASPEED_CLK_GATE_SDCLKCLK 22
+#define ASPEED_CLK_GATE_SDCLK 22
#define ASPEED_CLK_GATE_LHCCLK 23
#define ASPEED_CLK_HPLL 24
#define ASPEED_CLK_AHB 25
diff --git a/include/dt-bindings/clock/axg-audio-clkc.h b/include/dt-bindings/clock/axg-audio-clkc.h
new file mode 100644
index 000000000000..fd9c362099d9
--- /dev/null
+++ b/include/dt-bindings/clock/axg-audio-clkc.h
@@ -0,0 +1,94 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Copyright (c) 2018 Baylibre SAS.
+ * Author: Jerome Brunet <jbrunet@baylibre.com>
+ */
+
+#ifndef __AXG_AUDIO_CLKC_BINDINGS_H
+#define __AXG_AUDIO_CLKC_BINDINGS_H
+
+#define AUD_CLKID_SLV_SCLK0 9
+#define AUD_CLKID_SLV_SCLK1 10
+#define AUD_CLKID_SLV_SCLK2 11
+#define AUD_CLKID_SLV_SCLK3 12
+#define AUD_CLKID_SLV_SCLK4 13
+#define AUD_CLKID_SLV_SCLK5 14
+#define AUD_CLKID_SLV_SCLK6 15
+#define AUD_CLKID_SLV_SCLK7 16
+#define AUD_CLKID_SLV_SCLK8 17
+#define AUD_CLKID_SLV_SCLK9 18
+#define AUD_CLKID_SLV_LRCLK0 19
+#define AUD_CLKID_SLV_LRCLK1 20
+#define AUD_CLKID_SLV_LRCLK2 21
+#define AUD_CLKID_SLV_LRCLK3 22
+#define AUD_CLKID_SLV_LRCLK4 23
+#define AUD_CLKID_SLV_LRCLK5 24
+#define AUD_CLKID_SLV_LRCLK6 25
+#define AUD_CLKID_SLV_LRCLK7 26
+#define AUD_CLKID_SLV_LRCLK8 27
+#define AUD_CLKID_SLV_LRCLK9 28
+#define AUD_CLKID_DDR_ARB 29
+#define AUD_CLKID_PDM 30
+#define AUD_CLKID_TDMIN_A 31
+#define AUD_CLKID_TDMIN_B 32
+#define AUD_CLKID_TDMIN_C 33
+#define AUD_CLKID_TDMIN_LB 34
+#define AUD_CLKID_TDMOUT_A 35
+#define AUD_CLKID_TDMOUT_B 36
+#define AUD_CLKID_TDMOUT_C 37
+#define AUD_CLKID_FRDDR_A 38
+#define AUD_CLKID_FRDDR_B 39
+#define AUD_CLKID_FRDDR_C 40
+#define AUD_CLKID_TODDR_A 41
+#define AUD_CLKID_TODDR_B 42
+#define AUD_CLKID_TODDR_C 43
+#define AUD_CLKID_LOOPBACK 44
+#define AUD_CLKID_SPDIFIN 45
+#define AUD_CLKID_SPDIFOUT 46
+#define AUD_CLKID_RESAMPLE 47
+#define AUD_CLKID_POWER_DETECT 48
+#define AUD_CLKID_MST_A_MCLK 49
+#define AUD_CLKID_MST_B_MCLK 50
+#define AUD_CLKID_MST_C_MCLK 51
+#define AUD_CLKID_MST_D_MCLK 52
+#define AUD_CLKID_MST_E_MCLK 53
+#define AUD_CLKID_MST_F_MCLK 54
+#define AUD_CLKID_SPDIFOUT_CLK 55
+#define AUD_CLKID_SPDIFIN_CLK 56
+#define AUD_CLKID_PDM_DCLK 57
+#define AUD_CLKID_PDM_SYSCLK 58
+#define AUD_CLKID_MST_A_SCLK 79
+#define AUD_CLKID_MST_B_SCLK 80
+#define AUD_CLKID_MST_C_SCLK 81
+#define AUD_CLKID_MST_D_SCLK 82
+#define AUD_CLKID_MST_E_SCLK 83
+#define AUD_CLKID_MST_F_SCLK 84
+#define AUD_CLKID_MST_A_LRCLK 86
+#define AUD_CLKID_MST_B_LRCLK 87
+#define AUD_CLKID_MST_C_LRCLK 88
+#define AUD_CLKID_MST_D_LRCLK 89
+#define AUD_CLKID_MST_E_LRCLK 90
+#define AUD_CLKID_MST_F_LRCLK 91
+#define AUD_CLKID_TDMIN_A_SCLK_SEL 116
+#define AUD_CLKID_TDMIN_B_SCLK_SEL 117
+#define AUD_CLKID_TDMIN_C_SCLK_SEL 118
+#define AUD_CLKID_TDMIN_LB_SCLK_SEL 119
+#define AUD_CLKID_TDMOUT_A_SCLK_SEL 120
+#define AUD_CLKID_TDMOUT_B_SCLK_SEL 121
+#define AUD_CLKID_TDMOUT_C_SCLK_SEL 122
+#define AUD_CLKID_TDMIN_A_SCLK 123
+#define AUD_CLKID_TDMIN_B_SCLK 124
+#define AUD_CLKID_TDMIN_C_SCLK 125
+#define AUD_CLKID_TDMIN_LB_SCLK 126
+#define AUD_CLKID_TDMOUT_A_SCLK 127
+#define AUD_CLKID_TDMOUT_B_SCLK 128
+#define AUD_CLKID_TDMOUT_C_SCLK 129
+#define AUD_CLKID_TDMIN_A_LRCLK 130
+#define AUD_CLKID_TDMIN_B_LRCLK 131
+#define AUD_CLKID_TDMIN_C_LRCLK 132
+#define AUD_CLKID_TDMIN_LB_LRCLK 133
+#define AUD_CLKID_TDMOUT_A_LRCLK 134
+#define AUD_CLKID_TDMOUT_B_LRCLK 135
+#define AUD_CLKID_TDMOUT_C_LRCLK 136
+
+#endif /* __AXG_AUDIO_CLKC_BINDINGS_H */
diff --git a/include/dt-bindings/clock/axg-clkc.h b/include/dt-bindings/clock/axg-clkc.h
index 555937a25504..fd1f938c38d1 100644
--- a/include/dt-bindings/clock/axg-clkc.h
+++ b/include/dt-bindings/clock/axg-clkc.h
@@ -68,5 +68,9 @@
#define CLKID_SD_EMMC_B_CLK0 59
#define CLKID_SD_EMMC_C_CLK0 60
#define CLKID_HIFI_PLL 69
+#define CLKID_PCIE_CML_EN0 79
+#define CLKID_PCIE_CML_EN1 80
+#define CLKID_MIPI_ENABLE 81
+#define CLKID_GEN_CLK 84
#endif /* __AXG_CLKC_H */
diff --git a/include/dt-bindings/clock/dra7.h b/include/dt-bindings/clock/dra7.h
index 5e1061b15aed..d7549c57cac3 100644
--- a/include/dt-bindings/clock/dra7.h
+++ b/include/dt-bindings/clock/dra7.h
@@ -168,5 +168,6 @@
#define DRA7_COUNTER_32K_CLKCTRL DRA7_CLKCTRL_INDEX(0x50)
#define DRA7_UART10_CLKCTRL DRA7_CLKCTRL_INDEX(0x80)
#define DRA7_DCAN1_CLKCTRL DRA7_CLKCTRL_INDEX(0x88)
+#define DRA7_ADC_CLKCTRL DRA7_CLKCTRL_INDEX(0xa0)
#endif
diff --git a/include/dt-bindings/clock/exynos5440.h b/include/dt-bindings/clock/exynos5440.h
deleted file mode 100644
index 842cdc0adff1..000000000000
--- a/include/dt-bindings/clock/exynos5440.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * Copyright (c) 2013 Samsung Electronics Co., Ltd.
- * Author: Andrzej Hajda <a.hajda@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Device Tree binding constants for Exynos5440 clock controller.
-*/
-
-#ifndef _DT_BINDINGS_CLOCK_EXYNOS_5440_H
-#define _DT_BINDINGS_CLOCK_EXYNOS_5440_H
-
-#define CLK_XTAL 1
-#define CLK_ARM_CLK 2
-#define CLK_CPLLA 3
-#define CLK_CPLLB 4
-#define CLK_SPI_BAUD 16
-#define CLK_PB0_250 17
-#define CLK_PR0_250 18
-#define CLK_PR1_250 19
-#define CLK_B_250 20
-#define CLK_B_125 21
-#define CLK_B_200 22
-#define CLK_SATA 23
-#define CLK_USB 24
-#define CLK_GMAC0 25
-#define CLK_CS250 26
-#define CLK_PB0_250_O 27
-#define CLK_PR0_250_O 28
-#define CLK_PR1_250_O 29
-#define CLK_B_250_O 30
-#define CLK_B_125_O 31
-#define CLK_B_200_O 32
-#define CLK_SATA_O 33
-#define CLK_USB_O 34
-#define CLK_GMAC0_O 35
-#define CLK_CS250_O 36
-
-/* must be greater than maximal clock id */
-#define CLK_NR_CLKS 37
-
-#endif /* _DT_BINDINGS_CLOCK_EXYNOS_5440_H */
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index 7a892be90549..3979d48c025f 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -127,5 +127,6 @@
#define CLKID_VAPB 140
#define CLKID_VDEC_1 153
#define CLKID_VDEC_HEVC 156
+#define CLKID_GEN_CLK 159
#endif /* __GXBB_CLKC_H */
diff --git a/include/dt-bindings/clock/imx6sll-clock.h b/include/dt-bindings/clock/imx6sll-clock.h
index 151111e68f4f..1036475f997d 100644
--- a/include/dt-bindings/clock/imx6sll-clock.h
+++ b/include/dt-bindings/clock/imx6sll-clock.h
@@ -197,6 +197,13 @@
#define IMX6SLL_CLK_EXTERN_AUDIO_PODF 171
#define IMX6SLL_CLK_EXTERN_AUDIO 172
-#define IMX6SLL_CLK_END 173
+#define IMX6SLL_CLK_GPIO1 173
+#define IMX6SLL_CLK_GPIO2 174
+#define IMX6SLL_CLK_GPIO3 175
+#define IMX6SLL_CLK_GPIO4 176
+#define IMX6SLL_CLK_GPIO5 177
+#define IMX6SLL_CLK_GPIO6 178
+
+#define IMX6SLL_CLK_END 179
#endif /* __DT_BINDINGS_CLOCK_IMX6SLL_H */
diff --git a/include/dt-bindings/clock/imx6ul-clock.h b/include/dt-bindings/clock/imx6ul-clock.h
index 9564597cbfac..f8e0476a3a0e 100644
--- a/include/dt-bindings/clock/imx6ul-clock.h
+++ b/include/dt-bindings/clock/imx6ul-clock.h
@@ -235,27 +235,31 @@
#define IMX6UL_CLK_CSI_PODF 222
#define IMX6UL_CLK_PLL3_120M 223
#define IMX6UL_CLK_KPP 224
-#define IMX6UL_CLK_CKO1_SEL 225
-#define IMX6UL_CLK_CKO1_PODF 226
-#define IMX6UL_CLK_CKO1 227
-#define IMX6UL_CLK_CKO2_SEL 228
-#define IMX6UL_CLK_CKO2_PODF 229
-#define IMX6UL_CLK_CKO2 230
-#define IMX6UL_CLK_CKO 231
+#define IMX6ULL_CLK_ESAI_PRED 225
+#define IMX6ULL_CLK_ESAI_PODF 226
+#define IMX6ULL_CLK_ESAI_EXTAL 227
+#define IMX6ULL_CLK_ESAI_MEM 228
+#define IMX6ULL_CLK_ESAI_IPG 229
+#define IMX6ULL_CLK_DCP_CLK 230
+#define IMX6ULL_CLK_EPDC_PRE_SEL 231
+#define IMX6ULL_CLK_EPDC_SEL 232
+#define IMX6ULL_CLK_EPDC_PODF 233
+#define IMX6ULL_CLK_EPDC_ACLK 234
+#define IMX6ULL_CLK_EPDC_PIX 235
+#define IMX6ULL_CLK_ESAI_SEL 236
+#define IMX6UL_CLK_CKO1_SEL 237
+#define IMX6UL_CLK_CKO1_PODF 238
+#define IMX6UL_CLK_CKO1 239
+#define IMX6UL_CLK_CKO2_SEL 240
+#define IMX6UL_CLK_CKO2_PODF 241
+#define IMX6UL_CLK_CKO2 242
+#define IMX6UL_CLK_CKO 243
+#define IMX6UL_CLK_GPIO1 244
+#define IMX6UL_CLK_GPIO2 245
+#define IMX6UL_CLK_GPIO3 246
+#define IMX6UL_CLK_GPIO4 247
+#define IMX6UL_CLK_GPIO5 248
-/* For i.MX6ULL */
-#define IMX6ULL_CLK_ESAI_PRED 232
-#define IMX6ULL_CLK_ESAI_PODF 233
-#define IMX6ULL_CLK_ESAI_EXTAL 234
-#define IMX6ULL_CLK_ESAI_MEM 235
-#define IMX6ULL_CLK_ESAI_IPG 236
-#define IMX6ULL_CLK_DCP_CLK 237
-#define IMX6ULL_CLK_EPDC_PRE_SEL 238
-#define IMX6ULL_CLK_EPDC_SEL 239
-#define IMX6ULL_CLK_EPDC_PODF 240
-#define IMX6ULL_CLK_EPDC_ACLK 241
-#define IMX6ULL_CLK_EPDC_PIX 242
-#define IMX6ULL_CLK_ESAI_SEL 243
-#define IMX6UL_CLK_END 244
+#define IMX6UL_CLK_END 249
#endif /* __DT_BINDINGS_CLOCK_IMX6UL_H */
diff --git a/include/dt-bindings/clock/maxim,max9485.h b/include/dt-bindings/clock/maxim,max9485.h
new file mode 100644
index 000000000000..185b09ce1869
--- /dev/null
+++ b/include/dt-bindings/clock/maxim,max9485.h
@@ -0,0 +1,18 @@
+/*
+ * Copyright (C) 2018 Daniel Mack
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef __DT_BINDINGS_MAX9485_CLK_H
+#define __DT_BINDINGS_MAX9485_CLK_H
+
+#define MAX9485_MCLKOUT 0
+#define MAX9485_CLKOUT 1
+#define MAX9485_CLKOUT1 2
+#define MAX9485_CLKOUT2 3
+
+#endif /* __DT_BINDINGS_MAX9485_CLK_H */
diff --git a/include/dt-bindings/clock/px30-cru.h b/include/dt-bindings/clock/px30-cru.h
new file mode 100644
index 000000000000..00101479f7c4
--- /dev/null
+++ b/include/dt-bindings/clock/px30-cru.h
@@ -0,0 +1,389 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_CLK_ROCKCHIP_PX30_H
+#define _DT_BINDINGS_CLK_ROCKCHIP_PX30_H
+
+/* core clocks */
+#define PLL_APLL 1
+#define PLL_DPLL 2
+#define PLL_CPLL 3
+#define PLL_NPLL 4
+#define APLL_BOOST_H 5
+#define APLL_BOOST_L 6
+#define ARMCLK 7
+
+/* sclk gates (special clocks) */
+#define USB480M 14
+#define SCLK_PDM 15
+#define SCLK_I2S0_TX 16
+#define SCLK_I2S0_TX_OUT 17
+#define SCLK_I2S0_RX 18
+#define SCLK_I2S0_RX_OUT 19
+#define SCLK_I2S1 20
+#define SCLK_I2S1_OUT 21
+#define SCLK_I2S2 22
+#define SCLK_I2S2_OUT 23
+#define SCLK_UART1 24
+#define SCLK_UART2 25
+#define SCLK_UART3 26
+#define SCLK_UART4 27
+#define SCLK_UART5 28
+#define SCLK_I2C0 29
+#define SCLK_I2C1 30
+#define SCLK_I2C2 31
+#define SCLK_I2C3 32
+#define SCLK_I2C4 33
+#define SCLK_PWM0 34
+#define SCLK_PWM1 35
+#define SCLK_SPI0 36
+#define SCLK_SPI1 37
+#define SCLK_TIMER0 38
+#define SCLK_TIMER1 39
+#define SCLK_TIMER2 40
+#define SCLK_TIMER3 41
+#define SCLK_TIMER4 42
+#define SCLK_TIMER5 43
+#define SCLK_TSADC 44
+#define SCLK_SARADC 45
+#define SCLK_OTP 46
+#define SCLK_OTP_USR 47
+#define SCLK_CRYPTO 48
+#define SCLK_CRYPTO_APK 49
+#define SCLK_DDRC 50
+#define SCLK_ISP 51
+#define SCLK_CIF_OUT 52
+#define SCLK_RGA_CORE 53
+#define SCLK_VOPB_PWM 54
+#define SCLK_NANDC 55
+#define SCLK_SDIO 56
+#define SCLK_EMMC 57
+#define SCLK_SFC 58
+#define SCLK_SDMMC 59
+#define SCLK_OTG_ADP 60
+#define SCLK_GMAC_SRC 61
+#define SCLK_GMAC 62
+#define SCLK_GMAC_RX_TX 63
+#define SCLK_MAC_REF 64
+#define SCLK_MAC_REFOUT 65
+#define SCLK_MAC_OUT 66
+#define SCLK_SDMMC_DRV 67
+#define SCLK_SDMMC_SAMPLE 68
+#define SCLK_SDIO_DRV 69
+#define SCLK_SDIO_SAMPLE 70
+#define SCLK_EMMC_DRV 71
+#define SCLK_EMMC_SAMPLE 72
+#define SCLK_GPU 73
+#define SCLK_PVTM 74
+#define SCLK_CORE_VPU 75
+#define SCLK_GMAC_RMII 76
+#define SCLK_UART2_SRC 77
+#define SCLK_NANDC_DIV 78
+#define SCLK_NANDC_DIV50 79
+#define SCLK_SDIO_DIV 80
+#define SCLK_SDIO_DIV50 81
+#define SCLK_EMMC_DIV 82
+#define SCLK_EMMC_DIV50 83
+#define SCLK_DDRCLK 84
+#define SCLK_UART1_SRC 85
+
+/* dclk gates */
+#define DCLK_VOPB 150
+#define DCLK_VOPL 151
+
+/* aclk gates */
+#define ACLK_GPU 170
+#define ACLK_BUS_PRE 171
+#define ACLK_CRYPTO 172
+#define ACLK_VI_PRE 173
+#define ACLK_VO_PRE 174
+#define ACLK_VPU 175
+#define ACLK_PERI_PRE 176
+#define ACLK_GMAC 178
+#define ACLK_CIF 179
+#define ACLK_ISP 180
+#define ACLK_VOPB 181
+#define ACLK_VOPL 182
+#define ACLK_RGA 183
+#define ACLK_GIC 184
+#define ACLK_DCF 186
+#define ACLK_DMAC 187
+#define ACLK_BUS_SRC 188
+#define ACLK_PERI_SRC 189
+
+/* hclk gates */
+#define HCLK_BUS_PRE 240
+#define HCLK_CRYPTO 241
+#define HCLK_VI_PRE 242
+#define HCLK_VO_PRE 243
+#define HCLK_VPU 244
+#define HCLK_PERI_PRE 245
+#define HCLK_MMC_NAND 246
+#define HCLK_SDMMC 247
+#define HCLK_USB 248
+#define HCLK_CIF 249
+#define HCLK_ISP 250
+#define HCLK_VOPB 251
+#define HCLK_VOPL 252
+#define HCLK_RGA 253
+#define HCLK_NANDC 254
+#define HCLK_SDIO 255
+#define HCLK_EMMC 256
+#define HCLK_SFC 257
+#define HCLK_OTG 258
+#define HCLK_HOST 259
+#define HCLK_HOST_ARB 260
+#define HCLK_PDM 261
+#define HCLK_I2S0 262
+#define HCLK_I2S1 263
+#define HCLK_I2S2 264
+
+/* pclk gates */
+#define PCLK_BUS_PRE 320
+#define PCLK_DDR 321
+#define PCLK_VO_PRE 322
+#define PCLK_GMAC 323
+#define PCLK_MIPI_DSI 324
+#define PCLK_MIPIDSIPHY 325
+#define PCLK_MIPICSIPHY 326
+#define PCLK_USB_GRF 327
+#define PCLK_DCF 328
+#define PCLK_UART1 329
+#define PCLK_UART2 330
+#define PCLK_UART3 331
+#define PCLK_UART4 332
+#define PCLK_UART5 333
+#define PCLK_I2C0 334
+#define PCLK_I2C1 335
+#define PCLK_I2C2 336
+#define PCLK_I2C3 337
+#define PCLK_I2C4 338
+#define PCLK_PWM0 339
+#define PCLK_PWM1 340
+#define PCLK_SPI0 341
+#define PCLK_SPI1 342
+#define PCLK_SARADC 343
+#define PCLK_TSADC 344
+#define PCLK_TIMER 345
+#define PCLK_OTP_NS 346
+#define PCLK_WDT_NS 347
+#define PCLK_GPIO1 348
+#define PCLK_GPIO2 349
+#define PCLK_GPIO3 350
+#define PCLK_ISP 351
+#define PCLK_CIF 352
+#define PCLK_OTP_PHY 353
+
+#define CLK_NR_CLKS (PCLK_OTP_PHY + 1)
+
+/* pmu-clocks indices */
+
+#define PLL_GPLL 1
+
+#define SCLK_RTC32K_PMU 4
+#define SCLK_WIFI_PMU 5
+#define SCLK_UART0_PMU 6
+#define SCLK_PVTM_PMU 7
+#define PCLK_PMU_PRE 8
+#define SCLK_REF24M_PMU 9
+#define SCLK_USBPHY_REF 10
+#define SCLK_MIPIDSIPHY_REF 11
+
+#define XIN24M_DIV 12
+
+#define PCLK_GPIO0_PMU 20
+#define PCLK_UART0_PMU 21
+
+#define CLKPMU_NR_CLKS (PCLK_UART0_PMU + 1)
+
+/* soft-reset indices */
+#define SRST_CORE0_PO 0
+#define SRST_CORE1_PO 1
+#define SRST_CORE2_PO 2
+#define SRST_CORE3_PO 3
+#define SRST_CORE0 4
+#define SRST_CORE1 5
+#define SRST_CORE2 6
+#define SRST_CORE3 7
+#define SRST_CORE0_DBG 8
+#define SRST_CORE1_DBG 9
+#define SRST_CORE2_DBG 10
+#define SRST_CORE3_DBG 11
+#define SRST_TOPDBG 12
+#define SRST_CORE_NOC 13
+#define SRST_STRC_A 14
+#define SRST_L2C 15
+
+#define SRST_DAP 16
+#define SRST_CORE_PVTM 17
+#define SRST_GPU 18
+#define SRST_GPU_NIU 19
+#define SRST_UPCTL2 20
+#define SRST_UPCTL2_A 21
+#define SRST_UPCTL2_P 22
+#define SRST_MSCH 23
+#define SRST_MSCH_P 24
+#define SRST_DDRMON_P 25
+#define SRST_DDRSTDBY_P 26
+#define SRST_DDRSTDBY 27
+#define SRST_DDRGRF_p 28
+#define SRST_AXI_SPLIT_A 29
+#define SRST_AXI_CMD_A 30
+#define SRST_AXI_CMD_P 31
+
+#define SRST_DDRPHY 32
+#define SRST_DDRPHYDIV 33
+#define SRST_DDRPHY_P 34
+#define SRST_VPU_A 36
+#define SRST_VPU_NIU_A 37
+#define SRST_VPU_H 38
+#define SRST_VPU_NIU_H 39
+#define SRST_VI_NIU_A 40
+#define SRST_VI_NIU_H 41
+#define SRST_ISP_H 42
+#define SRST_ISP 43
+#define SRST_CIF_A 44
+#define SRST_CIF_H 45
+#define SRST_CIF_PCLKIN 46
+#define SRST_MIPICSIPHY_P 47
+
+#define SRST_VO_NIU_A 48
+#define SRST_VO_NIU_H 49
+#define SRST_VO_NIU_P 50
+#define SRST_VOPB_A 51
+#define SRST_VOPB_H 52
+#define SRST_VOPB 53
+#define SRST_PWM_VOPB 54
+#define SRST_VOPL_A 55
+#define SRST_VOPL_H 56
+#define SRST_VOPL 57
+#define SRST_RGA_A 58
+#define SRST_RGA_H 59
+#define SRST_RGA 60
+#define SRST_MIPIDSI_HOST_P 61
+#define SRST_MIPIDSIPHY_P 62
+#define SRST_VPU_CORE 63
+
+#define SRST_PERI_NIU_A 64
+#define SRST_USB_NIU_H 65
+#define SRST_USB2OTG_H 66
+#define SRST_USB2OTG 67
+#define SRST_USB2OTG_ADP 68
+#define SRST_USB2HOST_H 69
+#define SRST_USB2HOST_ARB_H 70
+#define SRST_USB2HOST_AUX_H 71
+#define SRST_USB2HOST_EHCI 72
+#define SRST_USB2HOST 73
+#define SRST_USBPHYPOR 74
+#define SRST_USBPHY_OTG_PORT 75
+#define SRST_USBPHY_HOST_PORT 76
+#define SRST_USBPHY_GRF 77
+#define SRST_CPU_BOOST_P 78
+#define SRST_CPU_BOOST 79
+
+#define SRST_MMC_NAND_NIU_H 80
+#define SRST_SDIO_H 81
+#define SRST_EMMC_H 82
+#define SRST_SFC_H 83
+#define SRST_SFC 84
+#define SRST_SDCARD_NIU_H 85
+#define SRST_SDMMC_H 86
+#define SRST_NANDC_H 89
+#define SRST_NANDC 90
+#define SRST_GMAC_NIU_A 92
+#define SRST_GMAC_NIU_P 93
+#define SRST_GMAC_A 94
+
+#define SRST_PMU_NIU_P 96
+#define SRST_PMU_SGRF_P 97
+#define SRST_PMU_GRF_P 98
+#define SRST_PMU 99
+#define SRST_PMU_MEM_P 100
+#define SRST_PMU_GPIO0_P 101
+#define SRST_PMU_UART0_P 102
+#define SRST_PMU_CRU_P 103
+#define SRST_PMU_PVTM 104
+#define SRST_PMU_UART 105
+#define SRST_PMU_NIU_H 106
+#define SRST_PMU_DDR_FAIL_SAVE 107
+#define SRST_PMU_CORE_PERF_A 108
+#define SRST_PMU_CORE_GRF_P 109
+#define SRST_PMU_GPU_PERF_A 110
+#define SRST_PMU_GPU_GRF_P 111
+
+#define SRST_CRYPTO_NIU_A 112
+#define SRST_CRYPTO_NIU_H 113
+#define SRST_CRYPTO_A 114
+#define SRST_CRYPTO_H 115
+#define SRST_CRYPTO 116
+#define SRST_CRYPTO_APK 117
+#define SRST_BUS_NIU_H 120
+#define SRST_USB_NIU_P 121
+#define SRST_BUS_TOP_NIU_P 122
+#define SRST_INTMEM_A 123
+#define SRST_GIC_A 124
+#define SRST_ROM_H 126
+#define SRST_DCF_A 127
+
+#define SRST_DCF_P 128
+#define SRST_PDM_H 129
+#define SRST_PDM 130
+#define SRST_I2S0_H 131
+#define SRST_I2S0_TX 132
+#define SRST_I2S1_H 133
+#define SRST_I2S1 134
+#define SRST_I2S2_H 135
+#define SRST_I2S2 136
+#define SRST_UART1_P 137
+#define SRST_UART1 138
+#define SRST_UART2_P 139
+#define SRST_UART2 140
+#define SRST_UART3_P 141
+#define SRST_UART3 142
+#define SRST_UART4_P 143
+
+#define SRST_UART4 144
+#define SRST_UART5_P 145
+#define SRST_UART5 146
+#define SRST_I2C0_P 147
+#define SRST_I2C0 148
+#define SRST_I2C1_P 149
+#define SRST_I2C1 150
+#define SRST_I2C2_P 151
+#define SRST_I2C2 152
+#define SRST_I2C3_P 153
+#define SRST_I2C3 154
+#define SRST_PWM0_P 157
+#define SRST_PWM0 158
+#define SRST_PWM1_P 159
+
+#define SRST_PWM1 160
+#define SRST_SPI0_P 161
+#define SRST_SPI0 162
+#define SRST_SPI1_P 163
+#define SRST_SPI1 164
+#define SRST_SARADC_P 165
+#define SRST_SARADC 166
+#define SRST_TSADC_P 167
+#define SRST_TSADC 168
+#define SRST_TIMER_P 169
+#define SRST_TIMER0 170
+#define SRST_TIMER1 171
+#define SRST_TIMER2 172
+#define SRST_TIMER3 173
+#define SRST_TIMER4 174
+#define SRST_TIMER5 175
+
+#define SRST_OTP_NS_P 176
+#define SRST_OTP_NS_SBPI 177
+#define SRST_OTP_NS_USR 178
+#define SRST_OTP_PHY_P 179
+#define SRST_OTP_PHY 180
+#define SRST_WDT_NS_P 181
+#define SRST_GPIO1_P 182
+#define SRST_GPIO2_P 183
+#define SRST_GPIO3_P 184
+#define SRST_SGRF_P 185
+#define SRST_GRF_P 186
+#define SRST_I2S0_RX 191
+
+#endif
diff --git a/include/dt-bindings/clock/pxa-clock.h b/include/dt-bindings/clock/pxa-clock.h
index e65803b1dc7e..0b0fd2b01538 100644
--- a/include/dt-bindings/clock/pxa-clock.h
+++ b/include/dt-bindings/clock/pxa-clock.h
@@ -72,6 +72,7 @@
#define CLK_USIM 58
#define CLK_USIM1 59
#define CLK_USMI0 60
-#define CLK_MAX 61
+#define CLK_OSC32k768 61
+#define CLK_MAX 62
#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sdm845.h b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
new file mode 100644
index 000000000000..11eed4bc9646
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_SDM_DISP_CC_SDM845_H
+#define _DT_BINDINGS_CLK_SDM_DISP_CC_SDM845_H
+
+/* DISP_CC clock registers */
+#define DISP_CC_MDSS_AHB_CLK 0
+#define DISP_CC_MDSS_AXI_CLK 1
+#define DISP_CC_MDSS_BYTE0_CLK 2
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 4
+#define DISP_CC_MDSS_BYTE1_CLK 5
+#define DISP_CC_MDSS_BYTE1_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE1_INTF_CLK 7
+#define DISP_CC_MDSS_ESC0_CLK 8
+#define DISP_CC_MDSS_ESC0_CLK_SRC 9
+#define DISP_CC_MDSS_ESC1_CLK 10
+#define DISP_CC_MDSS_ESC1_CLK_SRC 11
+#define DISP_CC_MDSS_MDP_CLK 12
+#define DISP_CC_MDSS_MDP_CLK_SRC 13
+#define DISP_CC_MDSS_MDP_LUT_CLK 14
+#define DISP_CC_MDSS_PCLK0_CLK 15
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 16
+#define DISP_CC_MDSS_PCLK1_CLK 17
+#define DISP_CC_MDSS_PCLK1_CLK_SRC 18
+#define DISP_CC_MDSS_ROT_CLK 19
+#define DISP_CC_MDSS_ROT_CLK_SRC 20
+#define DISP_CC_MDSS_RSCC_AHB_CLK 21
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 22
+#define DISP_CC_MDSS_VSYNC_CLK 23
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 24
+#define DISP_CC_PLL0 25
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 26
+#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 27
+
+/* DISP_CC Reset */
+#define DISP_CC_MDSS_RSCC_BCR 0
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm845.h b/include/dt-bindings/clock/qcom,gcc-sdm845.h
index aca61264f12c..f96fc2dbf60e 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm845.h
@@ -192,6 +192,8 @@
#define GCC_VS_CTRL_CLK_SRC 182
#define GCC_VSENSOR_CLK_SRC 183
#define GPLL4 184
+#define GCC_CPUSS_DVM_BUS_CLK 185
+#define GCC_CPUSS_GNOC_CLK 186
/* GCC Resets */
#define GCC_MMSS_BCR 0
diff --git a/include/dt-bindings/clock/r9a06g032-sysctrl.h b/include/dt-bindings/clock/r9a06g032-sysctrl.h
new file mode 100644
index 000000000000..90c0f3dc1ba1
--- /dev/null
+++ b/include/dt-bindings/clock/r9a06g032-sysctrl.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * R9A06G032 sysctrl IDs
+ *
+ * Copyright (C) 2018 Renesas Electronics Europe Limited
+ *
+ * Michel Pollet <michel.pollet@bp.renesas.com>, <buserror@gmail.com>
+ */
+
+#ifndef __DT_BINDINGS_R9A06G032_SYSCTRL_H__
+#define __DT_BINDINGS_R9A06G032_SYSCTRL_H__
+
+#define R9A06G032_CLK_PLL_USB 1
+#define R9A06G032_CLK_48 1 /* AKA CLK_PLL_USB */
+#define R9A06G032_MSEBIS_CLK 3 /* AKA CLKOUT_D16 */
+#define R9A06G032_MSEBIM_CLK 3 /* AKA CLKOUT_D16 */
+#define R9A06G032_CLK_DDRPHY_PLLCLK 5 /* AKA CLKOUT_D1OR2 */
+#define R9A06G032_CLK50 6 /* AKA CLKOUT_D20 */
+#define R9A06G032_CLK25 7 /* AKA CLKOUT_D40 */
+#define R9A06G032_CLK125 9 /* AKA CLKOUT_D8 */
+#define R9A06G032_CLK_P5_PG1 17 /* AKA DIV_P5_PG */
+#define R9A06G032_CLK_REF_SYNC 21 /* AKA DIV_REF_SYNC */
+#define R9A06G032_CLK_25_PG4 26
+#define R9A06G032_CLK_25_PG5 27
+#define R9A06G032_CLK_25_PG6 28
+#define R9A06G032_CLK_25_PG7 29
+#define R9A06G032_CLK_25_PG8 30
+#define R9A06G032_CLK_ADC 31
+#define R9A06G032_CLK_ECAT100 32
+#define R9A06G032_CLK_HSR100 33
+#define R9A06G032_CLK_I2C0 34
+#define R9A06G032_CLK_I2C1 35
+#define R9A06G032_CLK_MII_REF 36
+#define R9A06G032_CLK_NAND 37
+#define R9A06G032_CLK_NOUSBP2_PG6 38
+#define R9A06G032_CLK_P1_PG2 39
+#define R9A06G032_CLK_P1_PG3 40
+#define R9A06G032_CLK_P1_PG4 41
+#define R9A06G032_CLK_P4_PG3 42
+#define R9A06G032_CLK_P4_PG4 43
+#define R9A06G032_CLK_P6_PG1 44
+#define R9A06G032_CLK_P6_PG2 45
+#define R9A06G032_CLK_P6_PG3 46
+#define R9A06G032_CLK_P6_PG4 47
+#define R9A06G032_CLK_PCI_USB 48
+#define R9A06G032_CLK_QSPI0 49
+#define R9A06G032_CLK_QSPI1 50
+#define R9A06G032_CLK_RGMII_REF 51
+#define R9A06G032_CLK_RMII_REF 52
+#define R9A06G032_CLK_SDIO0 53
+#define R9A06G032_CLK_SDIO1 54
+#define R9A06G032_CLK_SERCOS100 55
+#define R9A06G032_CLK_SLCD 56
+#define R9A06G032_CLK_SPI0 57
+#define R9A06G032_CLK_SPI1 58
+#define R9A06G032_CLK_SPI2 59
+#define R9A06G032_CLK_SPI3 60
+#define R9A06G032_CLK_SPI4 61
+#define R9A06G032_CLK_SPI5 62
+#define R9A06G032_CLK_SWITCH 63
+#define R9A06G032_HCLK_ECAT125 65
+#define R9A06G032_HCLK_PINCONFIG 66
+#define R9A06G032_HCLK_SERCOS 67
+#define R9A06G032_HCLK_SGPIO2 68
+#define R9A06G032_HCLK_SGPIO3 69
+#define R9A06G032_HCLK_SGPIO4 70
+#define R9A06G032_HCLK_TIMER0 71
+#define R9A06G032_HCLK_TIMER1 72
+#define R9A06G032_HCLK_USBF 73
+#define R9A06G032_HCLK_USBH 74
+#define R9A06G032_HCLK_USBPM 75
+#define R9A06G032_CLK_48_PG_F 76
+#define R9A06G032_CLK_48_PG4 77
+#define R9A06G032_CLK_DDRPHY_PCLK 81 /* AKA CLK_REF_SYNC_D4 */
+#define R9A06G032_CLK_FW 81 /* AKA CLK_REF_SYNC_D4 */
+#define R9A06G032_CLK_CRYPTO 81 /* AKA CLK_REF_SYNC_D4 */
+#define R9A06G032_CLK_A7MP 84 /* AKA DIV_CA7 */
+#define R9A06G032_HCLK_CAN0 85
+#define R9A06G032_HCLK_CAN1 86
+#define R9A06G032_HCLK_DELTASIGMA 87
+#define R9A06G032_HCLK_PWMPTO 88
+#define R9A06G032_HCLK_RSV 89
+#define R9A06G032_HCLK_SGPIO0 90
+#define R9A06G032_HCLK_SGPIO1 91
+#define R9A06G032_RTOS_MDC 92
+#define R9A06G032_CLK_CM3 93
+#define R9A06G032_CLK_DDRC 94
+#define R9A06G032_CLK_ECAT25 95
+#define R9A06G032_CLK_HSR50 96
+#define R9A06G032_CLK_HW_RTOS 97
+#define R9A06G032_CLK_SERCOS50 98
+#define R9A06G032_HCLK_ADC 99
+#define R9A06G032_HCLK_CM3 100
+#define R9A06G032_HCLK_CRYPTO_EIP150 101
+#define R9A06G032_HCLK_CRYPTO_EIP93 102
+#define R9A06G032_HCLK_DDRC 103
+#define R9A06G032_HCLK_DMA0 104
+#define R9A06G032_HCLK_DMA1 105
+#define R9A06G032_HCLK_GMAC0 106
+#define R9A06G032_HCLK_GMAC1 107
+#define R9A06G032_HCLK_GPIO0 108
+#define R9A06G032_HCLK_GPIO1 109
+#define R9A06G032_HCLK_GPIO2 110
+#define R9A06G032_HCLK_HSR 111
+#define R9A06G032_HCLK_I2C0 112
+#define R9A06G032_HCLK_I2C1 113
+#define R9A06G032_HCLK_LCD 114
+#define R9A06G032_HCLK_MSEBI_M 115
+#define R9A06G032_HCLK_MSEBI_S 116
+#define R9A06G032_HCLK_NAND 117
+#define R9A06G032_HCLK_PG_I 118
+#define R9A06G032_HCLK_PG19 119
+#define R9A06G032_HCLK_PG20 120
+#define R9A06G032_HCLK_PG3 121
+#define R9A06G032_HCLK_PG4 122
+#define R9A06G032_HCLK_QSPI0 123
+#define R9A06G032_HCLK_QSPI1 124
+#define R9A06G032_HCLK_ROM 125
+#define R9A06G032_HCLK_RTC 126
+#define R9A06G032_HCLK_SDIO0 127
+#define R9A06G032_HCLK_SDIO1 128
+#define R9A06G032_HCLK_SEMAP 129
+#define R9A06G032_HCLK_SPI0 130
+#define R9A06G032_HCLK_SPI1 131
+#define R9A06G032_HCLK_SPI2 132
+#define R9A06G032_HCLK_SPI3 133
+#define R9A06G032_HCLK_SPI4 134
+#define R9A06G032_HCLK_SPI5 135
+#define R9A06G032_HCLK_SWITCH 136
+#define R9A06G032_HCLK_SWITCH_RG 137
+#define R9A06G032_HCLK_UART0 138
+#define R9A06G032_HCLK_UART1 139
+#define R9A06G032_HCLK_UART2 140
+#define R9A06G032_HCLK_UART3 141
+#define R9A06G032_HCLK_UART4 142
+#define R9A06G032_HCLK_UART5 143
+#define R9A06G032_HCLK_UART6 144
+#define R9A06G032_HCLK_UART7 145
+#define R9A06G032_CLK_UART0 146
+#define R9A06G032_CLK_UART1 147
+#define R9A06G032_CLK_UART2 148
+#define R9A06G032_CLK_UART3 149
+#define R9A06G032_CLK_UART4 150
+#define R9A06G032_CLK_UART5 151
+#define R9A06G032_CLK_UART6 152
+#define R9A06G032_CLK_UART7 153
+
+#endif /* __DT_BINDINGS_R9A06G032_SYSCTRL_H__ */
diff --git a/include/dt-bindings/clock/rk3399-ddr.h b/include/dt-bindings/clock/rk3399-ddr.h
new file mode 100644
index 000000000000..ed2280844963
--- /dev/null
+++ b/include/dt-bindings/clock/rk3399-ddr.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+
+#ifndef DT_BINDINGS_DDR_H
+#define DT_BINDINGS_DDR_H
+
+/*
+ * DDR3 SDRAM Standard Speed Bins include tCK, tRCD, tRP, tRAS and tRC for
+ * each corresponding bin.
+ */
+
+/* DDR3-800 (5-5-5) */
+#define DDR3_800D 0
+/* DDR3-800 (6-6-6) */
+#define DDR3_800E 1
+/* DDR3-1066 (6-6-6) */
+#define DDR3_1066E 2
+/* DDR3-1066 (7-7-7) */
+#define DDR3_1066F 3
+/* DDR3-1066 (8-8-8) */
+#define DDR3_1066G 4
+/* DDR3-1333 (7-7-7) */
+#define DDR3_1333F 5
+/* DDR3-1333 (8-8-8) */
+#define DDR3_1333G 6
+/* DDR3-1333 (9-9-9) */
+#define DDR3_1333H 7
+/* DDR3-1333 (10-10-10) */
+#define DDR3_1333J 8
+/* DDR3-1600 (8-8-8) */
+#define DDR3_1600G 9
+/* DDR3-1600 (9-9-9) */
+#define DDR3_1600H 10
+/* DDR3-1600 (10-10-10) */
+#define DDR3_1600J 11
+/* DDR3-1600 (11-11-11) */
+#define DDR3_1600K 12
+/* DDR3-1600 (10-10-10) */
+#define DDR3_1866J 13
+/* DDR3-1866 (11-11-11) */
+#define DDR3_1866K 14
+/* DDR3-1866 (12-12-12) */
+#define DDR3_1866L 15
+/* DDR3-1866 (13-13-13) */
+#define DDR3_1866M 16
+/* DDR3-2133 (11-11-11) */
+#define DDR3_2133K 17
+/* DDR3-2133 (12-12-12) */
+#define DDR3_2133L 18
+/* DDR3-2133 (13-13-13) */
+#define DDR3_2133M 19
+/* DDR3-2133 (14-14-14) */
+#define DDR3_2133N 20
+/* DDR3 ATF default */
+#define DDR3_DEFAULT 21
+
+#endif
diff --git a/include/dt-bindings/clock/sun8i-r40-ccu.h b/include/dt-bindings/clock/sun8i-r40-ccu.h
index 4fa5f69fc297..f9e15a235626 100644
--- a/include/dt-bindings/clock/sun8i-r40-ccu.h
+++ b/include/dt-bindings/clock/sun8i-r40-ccu.h
@@ -43,6 +43,10 @@
#ifndef _DT_BINDINGS_CLK_SUN8I_R40_H_
#define _DT_BINDINGS_CLK_SUN8I_R40_H_
+#define CLK_PLL_VIDEO0 7
+
+#define CLK_PLL_VIDEO1 16
+
#define CLK_CPU 24
#define CLK_BUS_MIPI_DSI 29
diff --git a/include/dt-bindings/clock/sun8i-tcon-top.h b/include/dt-bindings/clock/sun8i-tcon-top.h
new file mode 100644
index 000000000000..25164d767835
--- /dev/null
+++ b/include/dt-bindings/clock/sun8i-tcon-top.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/* Copyright (C) 2018 Jernej Skrabec <jernej.skrabec@siol.net> */
+
+#ifndef _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_
+#define _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_
+
+#define CLK_TCON_TOP_TV0 0
+#define CLK_TCON_TOP_TV1 1
+#define CLK_TCON_TOP_DSI 2
+
+#endif /* _DT_BINDINGS_CLOCK_SUN8I_TCON_TOP_H_ */
diff --git a/include/dt-bindings/gce/mt8173-gce.h b/include/dt-bindings/gce/mt8173-gce.h
new file mode 100644
index 000000000000..ffcf94ba96c6
--- /dev/null
+++ b/include/dt-bindings/gce/mt8173-gce.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ * Author: Houlong Wei <houlong.wei@mediatek.com>
+ *
+ */
+
+#ifndef _DT_BINDINGS_GCE_MT8173_H
+#define _DT_BINDINGS_GCE_MT8173_H
+
+/* GCE HW thread priority */
+#define CMDQ_THR_PRIO_LOWEST 0
+#define CMDQ_THR_PRIO_HIGHEST 1
+
+/* GCE SUBSYS */
+#define SUBSYS_1400XXXX 1
+#define SUBSYS_1401XXXX 2
+#define SUBSYS_1402XXXX 3
+
+/* GCE HW EVENT */
+#define CMDQ_EVENT_DISP_OVL0_SOF 11
+#define CMDQ_EVENT_DISP_OVL1_SOF 12
+#define CMDQ_EVENT_DISP_RDMA0_SOF 13
+#define CMDQ_EVENT_DISP_RDMA1_SOF 14
+#define CMDQ_EVENT_DISP_RDMA2_SOF 15
+#define CMDQ_EVENT_DISP_WDMA0_SOF 16
+#define CMDQ_EVENT_DISP_WDMA1_SOF 17
+#define CMDQ_EVENT_DISP_OVL0_EOF 39
+#define CMDQ_EVENT_DISP_OVL1_EOF 40
+#define CMDQ_EVENT_DISP_RDMA0_EOF 41
+#define CMDQ_EVENT_DISP_RDMA1_EOF 42
+#define CMDQ_EVENT_DISP_RDMA2_EOF 43
+#define CMDQ_EVENT_DISP_WDMA0_EOF 44
+#define CMDQ_EVENT_DISP_WDMA1_EOF 45
+#define CMDQ_EVENT_MUTEX0_STREAM_EOF 53
+#define CMDQ_EVENT_MUTEX1_STREAM_EOF 54
+#define CMDQ_EVENT_MUTEX2_STREAM_EOF 55
+#define CMDQ_EVENT_MUTEX3_STREAM_EOF 56
+#define CMDQ_EVENT_MUTEX4_STREAM_EOF 57
+#define CMDQ_EVENT_DISP_RDMA0_UNDERRUN 63
+#define CMDQ_EVENT_DISP_RDMA1_UNDERRUN 64
+#define CMDQ_EVENT_DISP_RDMA2_UNDERRUN 65
+
+#endif
diff --git a/include/dt-bindings/iio/adc/at91-sama5d2_adc.h b/include/dt-bindings/iio/adc/at91-sama5d2_adc.h
new file mode 100644
index 000000000000..70f99dbdbb42
--- /dev/null
+++ b/include/dt-bindings/iio/adc/at91-sama5d2_adc.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides constants for configuring the AT91 SAMA5D2 ADC
+ */
+
+#ifndef _DT_BINDINGS_IIO_ADC_AT91_SAMA5D2_ADC_H
+#define _DT_BINDINGS_IIO_ADC_AT91_SAMA5D2_ADC_H
+
+/* X relative position channel index */
+#define AT91_SAMA5D2_ADC_X_CHANNEL 24
+/* Y relative position channel index */
+#define AT91_SAMA5D2_ADC_Y_CHANNEL 25
+/* pressure channel index */
+#define AT91_SAMA5D2_ADC_P_CHANNEL 26
+
+#endif
diff --git a/include/dt-bindings/memory/mt2712-larb-port.h b/include/dt-bindings/memory/mt2712-larb-port.h
new file mode 100644
index 000000000000..6f9aa7349cef
--- /dev/null
+++ b/include/dt-bindings/memory/mt2712-larb-port.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017 MediaTek Inc.
+ * Author: Yong Wu <yong.wu@mediatek.com>
+ */
+#ifndef __DTS_IOMMU_PORT_MT2712_H
+#define __DTS_IOMMU_PORT_MT2712_H
+
+#define MTK_M4U_ID(larb, port) (((larb) << 5) | (port))
+
+#define M4U_LARB0_ID 0
+#define M4U_LARB1_ID 1
+#define M4U_LARB2_ID 2
+#define M4U_LARB3_ID 3
+#define M4U_LARB4_ID 4
+#define M4U_LARB5_ID 5
+#define M4U_LARB6_ID 6
+#define M4U_LARB7_ID 7
+#define M4U_LARB8_ID 8
+#define M4U_LARB9_ID 9
+
+/* larb0 */
+#define M4U_PORT_DISP_OVL0 MTK_M4U_ID(M4U_LARB0_ID, 0)
+#define M4U_PORT_DISP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 1)
+#define M4U_PORT_DISP_WDMA0 MTK_M4U_ID(M4U_LARB0_ID, 2)
+#define M4U_PORT_DISP_OD_R MTK_M4U_ID(M4U_LARB0_ID, 3)
+#define M4U_PORT_DISP_OD_W MTK_M4U_ID(M4U_LARB0_ID, 4)
+#define M4U_PORT_MDP_RDMA0 MTK_M4U_ID(M4U_LARB0_ID, 5)
+#define M4U_PORT_MDP_WDMA MTK_M4U_ID(M4U_LARB0_ID, 6)
+#define M4U_PORT_DISP_RDMA2 MTK_M4U_ID(M4U_LARB0_ID, 7)
+
+/* larb1 */
+#define M4U_PORT_HW_VDEC_MC_EXT MTK_M4U_ID(M4U_LARB1_ID, 0)
+#define M4U_PORT_HW_VDEC_PP_EXT MTK_M4U_ID(M4U_LARB1_ID, 1)
+#define M4U_PORT_HW_VDEC_UFO_EXT MTK_M4U_ID(M4U_LARB1_ID, 2)
+#define M4U_PORT_HW_VDEC_VLD_EXT MTK_M4U_ID(M4U_LARB1_ID, 3)
+#define M4U_PORT_HW_VDEC_VLD2_EXT MTK_M4U_ID(M4U_LARB1_ID, 4)
+#define M4U_PORT_HW_VDEC_AVC_MV_EXT MTK_M4U_ID(M4U_LARB1_ID, 5)
+#define M4U_PORT_HW_VDEC_PRED_RD_EXT MTK_M4U_ID(M4U_LARB1_ID, 6)
+#define M4U_PORT_HW_VDEC_PRED_WR_EXT MTK_M4U_ID(M4U_LARB1_ID, 7)
+#define M4U_PORT_HW_VDEC_PPWRAP_EXT MTK_M4U_ID(M4U_LARB1_ID, 8)
+#define M4U_PORT_HW_VDEC_TILE MTK_M4U_ID(M4U_LARB1_ID, 9)
+#define M4U_PORT_HW_IMG_RESZ_EXT MTK_M4U_ID(M4U_LARB1_ID, 10)
+
+/* larb2 */
+#define M4U_PORT_CAM_DMA0 MTK_M4U_ID(M4U_LARB2_ID, 0)
+#define M4U_PORT_CAM_DMA1 MTK_M4U_ID(M4U_LARB2_ID, 1)
+#define M4U_PORT_CAM_DMA2 MTK_M4U_ID(M4U_LARB2_ID, 2)
+
+/* larb3 */
+#define M4U_PORT_VENC_RCPU MTK_M4U_ID(M4U_LARB3_ID, 0)
+#define M4U_PORT_VENC_REC MTK_M4U_ID(M4U_LARB3_ID, 1)
+#define M4U_PORT_VENC_BSDMA MTK_M4U_ID(M4U_LARB3_ID, 2)
+#define M4U_PORT_VENC_SV_COMV MTK_M4U_ID(M4U_LARB3_ID, 3)
+#define M4U_PORT_VENC_RD_COMV MTK_M4U_ID(M4U_LARB3_ID, 4)
+#define M4U_PORT_VENC_CUR_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 5)
+#define M4U_PORT_VENC_REF_CHROMA MTK_M4U_ID(M4U_LARB3_ID, 6)
+#define M4U_PORT_VENC_CUR_LUMA MTK_M4U_ID(M4U_LARB3_ID, 7)
+#define M4U_PORT_VENC_REF_LUMA MTK_M4U_ID(M4U_LARB3_ID, 8)
+
+/* larb4 */
+#define M4U_PORT_DISP_OVL1 MTK_M4U_ID(M4U_LARB4_ID, 0)
+#define M4U_PORT_DISP_RDMA1 MTK_M4U_ID(M4U_LARB4_ID, 1)
+#define M4U_PORT_DISP_WDMA1 MTK_M4U_ID(M4U_LARB4_ID, 2)
+#define M4U_PORT_DISP_OD1_R MTK_M4U_ID(M4U_LARB4_ID, 3)
+#define M4U_PORT_DISP_OD1_W MTK_M4U_ID(M4U_LARB4_ID, 4)
+#define M4U_PORT_MDP_RDMA1 MTK_M4U_ID(M4U_LARB4_ID, 5)
+#define M4U_PORT_MDP_WROT1 MTK_M4U_ID(M4U_LARB4_ID, 6)
+
+/* larb5 */
+#define M4U_PORT_DISP_OVL2 MTK_M4U_ID(M4U_LARB5_ID, 0)
+#define M4U_PORT_DISP_WDMA2 MTK_M4U_ID(M4U_LARB5_ID, 1)
+#define M4U_PORT_MDP_RDMA2 MTK_M4U_ID(M4U_LARB5_ID, 2)
+#define M4U_PORT_MDP_WROT0 MTK_M4U_ID(M4U_LARB5_ID, 3)
+
+/* larb6 */
+#define M4U_PORT_JPGDEC_WDMA_0 MTK_M4U_ID(M4U_LARB6_ID, 0)
+#define M4U_PORT_JPGDEC_WDMA_1 MTK_M4U_ID(M4U_LARB6_ID, 1)
+#define M4U_PORT_JPGDEC_BSDMA_0 MTK_M4U_ID(M4U_LARB6_ID, 2)
+#define M4U_PORT_JPGDEC_BSDMA_1 MTK_M4U_ID(M4U_LARB6_ID, 3)
+
+/* larb7 */
+#define M4U_PORT_MDP_RDMA3 MTK_M4U_ID(M4U_LARB7_ID, 0)
+#define M4U_PORT_MDP_WROT2 MTK_M4U_ID(M4U_LARB7_ID, 1)
+
+/* larb8 */
+#define M4U_PORT_VDO MTK_M4U_ID(M4U_LARB8_ID, 0)
+#define M4U_PORT_NR MTK_M4U_ID(M4U_LARB8_ID, 1)
+#define M4U_PORT_WR_CHANNEL0 MTK_M4U_ID(M4U_LARB8_ID, 2)
+
+/* larb9 */
+#define M4U_PORT_TVD MTK_M4U_ID(M4U_LARB9_ID, 0)
+#define M4U_PORT_WR_CHANNEL1 MTK_M4U_ID(M4U_LARB9_ID, 1)
+
+#endif
diff --git a/include/dt-bindings/pinctrl/at91.h b/include/dt-bindings/pinctrl/at91.h
index 2732d6c0fb39..eb81867eac77 100644
--- a/include/dt-bindings/pinctrl/at91.h
+++ b/include/dt-bindings/pinctrl/at91.h
@@ -39,4 +39,8 @@
#define AT91_PERIPH_C 3
#define AT91_PERIPH_D 4
+#define ATMEL_PIO_DRVSTR_LO 1
+#define ATMEL_PIO_DRVSTR_ME 2
+#define ATMEL_PIO_DRVSTR_HI 3
+
#endif /* __DT_BINDINGS_AT91_PINCTRL_H__ */
diff --git a/include/dt-bindings/pinctrl/samsung.h b/include/dt-bindings/pinctrl/samsung.h
index ceb672305f59..b1832506b923 100644
--- a/include/dt-bindings/pinctrl/samsung.h
+++ b/include/dt-bindings/pinctrl/samsung.h
@@ -1,14 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Samsung's Exynos pinctrl bindings
*
* Copyright (c) 2016 Samsung Electronics Co., Ltd.
* http://www.samsung.com
* Author: Krzysztof Kozlowski <krzk@kernel.org>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
-*/
+ */
#ifndef __DT_BINDINGS_PINCTRL_SAMSUNG_H__
#define __DT_BINDINGS_PINCTRL_SAMSUNG_H__
diff --git a/include/dt-bindings/regulator/maxim,max77802.h b/include/dt-bindings/regulator/maxim,max77802.h
index cf28631d7109..d0baba1973d4 100644
--- a/include/dt-bindings/regulator/maxim,max77802.h
+++ b/include/dt-bindings/regulator/maxim,max77802.h
@@ -1,10 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Copyright (C) 2014 Google, Inc
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Device Tree binding constants for the Maxim 77802 PMIC regulators
*/
diff --git a/include/dt-bindings/regulator/qcom,rpmh-regulator.h b/include/dt-bindings/regulator/qcom,rpmh-regulator.h
new file mode 100644
index 000000000000..86713dcf9e02
--- /dev/null
+++ b/include/dt-bindings/regulator/qcom,rpmh-regulator.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2018, The Linux Foundation. All rights reserved. */
+
+#ifndef __QCOM_RPMH_REGULATOR_H
+#define __QCOM_RPMH_REGULATOR_H
+
+/*
+ * These mode constants may be used to specify modes for various RPMh regulator
+ * device tree properties (e.g. regulator-initial-mode). Each type of regulator
+ * supports a subset of the possible modes.
+ *
+ * %RPMH_REGULATOR_MODE_RET: Retention mode in which only an extremely small
+ * load current is allowed. This mode is supported
+ * by LDO and SMPS type regulators.
+ * %RPMH_REGULATOR_MODE_LPM: Low power mode in which a small load current is
+ * allowed. This mode corresponds to PFM for SMPS
+ * and BOB type regulators. This mode is supported
+ * by LDO, HFSMPS, BOB, and PMIC4 FTSMPS type
+ * regulators.
+ * %RPMH_REGULATOR_MODE_AUTO: Auto mode in which the regulator hardware
+ * automatically switches between LPM and HPM based
+ * upon the real-time load current. This mode is
+ * supported by HFSMPS, BOB, and PMIC4 FTSMPS type
+ * regulators.
+ * %RPMH_REGULATOR_MODE_HPM: High power mode in which the full rated current
+ * of the regulator is allowed. This mode
+ * corresponds to PWM for SMPS and BOB type
+ * regulators. This mode is supported by all types
+ * of regulators.
+ */
+#define RPMH_REGULATOR_MODE_RET 0
+#define RPMH_REGULATOR_MODE_LPM 1
+#define RPMH_REGULATOR_MODE_AUTO 2
+#define RPMH_REGULATOR_MODE_HPM 3
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,sdm845-aoss.h b/include/dt-bindings/reset/qcom,sdm845-aoss.h
new file mode 100644
index 000000000000..476c5fc873b6
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,sdm845-aoss.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_AOSS_SDM_845_H
+#define _DT_BINDINGS_RESET_AOSS_SDM_845_H
+
+#define AOSS_CC_MSS_RESTART 0
+#define AOSS_CC_CAMSS_RESTART 1
+#define AOSS_CC_VENUS_RESTART 2
+#define AOSS_CC_GPU_RESTART 3
+#define AOSS_CC_DISPSS_RESTART 4
+#define AOSS_CC_WCSS_RESTART 5
+#define AOSS_CC_LPASS_RESTART 6
+
+#endif
diff --git a/include/dt-bindings/soc/qcom,rpmh-rsc.h b/include/dt-bindings/soc/qcom,rpmh-rsc.h
new file mode 100644
index 000000000000..868f998ea998
--- /dev/null
+++ b/include/dt-bindings/soc/qcom,rpmh-rsc.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __DT_QCOM_RPMH_RSC_H__
+#define __DT_QCOM_RPMH_RSC_H__
+
+#define SLEEP_TCS 0
+#define WAKE_TCS 1
+#define ACTIVE_TCS 2
+#define CONTROL_TCS 3
+
+#endif /* __DT_QCOM_RPMH_RSC_H__ */
diff --git a/include/dt-bindings/usb/pd.h b/include/dt-bindings/usb/pd.h
new file mode 100644
index 000000000000..7b7a92fefa0a
--- /dev/null
+++ b/include/dt-bindings/usb/pd.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __DT_POWER_DELIVERY_H
+#define __DT_POWER_DELIVERY_H
+
+/* Power delivery Power Data Object definitions */
+#define PDO_TYPE_FIXED 0
+#define PDO_TYPE_BATT 1
+#define PDO_TYPE_VAR 2
+#define PDO_TYPE_APDO 3
+
+#define PDO_TYPE_SHIFT 30
+#define PDO_TYPE_MASK 0x3
+
+#define PDO_TYPE(t) ((t) << PDO_TYPE_SHIFT)
+
+#define PDO_VOLT_MASK 0x3ff
+#define PDO_CURR_MASK 0x3ff
+#define PDO_PWR_MASK 0x3ff
+
+#define PDO_FIXED_DUAL_ROLE (1 << 29) /* Power role swap supported */
+#define PDO_FIXED_SUSPEND (1 << 28) /* USB Suspend supported (Source) */
+#define PDO_FIXED_HIGHER_CAP (1 << 28) /* Requires more than vSafe5V (Sink) */
+#define PDO_FIXED_EXTPOWER (1 << 27) /* Externally powered */
+#define PDO_FIXED_USB_COMM (1 << 26) /* USB communications capable */
+#define PDO_FIXED_DATA_SWAP (1 << 25) /* Data role swap supported */
+#define PDO_FIXED_VOLT_SHIFT 10 /* 50mV units */
+#define PDO_FIXED_CURR_SHIFT 0 /* 10mA units */
+
+#define PDO_FIXED_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_FIXED_VOLT_SHIFT)
+#define PDO_FIXED_CURR(ma) ((((ma) / 10) & PDO_CURR_MASK) << PDO_FIXED_CURR_SHIFT)
+
+#define PDO_FIXED(mv, ma, flags) \
+ (PDO_TYPE(PDO_TYPE_FIXED) | (flags) | \
+ PDO_FIXED_VOLT(mv) | PDO_FIXED_CURR(ma))
+
+#define VSAFE5V 5000 /* mv units */
+
+#define PDO_BATT_MAX_VOLT_SHIFT 20 /* 50mV units */
+#define PDO_BATT_MIN_VOLT_SHIFT 10 /* 50mV units */
+#define PDO_BATT_MAX_PWR_SHIFT 0 /* 250mW units */
+
+#define PDO_BATT_MIN_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_BATT_MIN_VOLT_SHIFT)
+#define PDO_BATT_MAX_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_BATT_MAX_VOLT_SHIFT)
+#define PDO_BATT_MAX_POWER(mw) ((((mw) / 250) & PDO_PWR_MASK) << PDO_BATT_MAX_PWR_SHIFT)
+
+#define PDO_BATT(min_mv, max_mv, max_mw) \
+ (PDO_TYPE(PDO_TYPE_BATT) | PDO_BATT_MIN_VOLT(min_mv) | \
+ PDO_BATT_MAX_VOLT(max_mv) | PDO_BATT_MAX_POWER(max_mw))
+
+#define PDO_VAR_MAX_VOLT_SHIFT 20 /* 50mV units */
+#define PDO_VAR_MIN_VOLT_SHIFT 10 /* 50mV units */
+#define PDO_VAR_MAX_CURR_SHIFT 0 /* 10mA units */
+
+#define PDO_VAR_MIN_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_VAR_MIN_VOLT_SHIFT)
+#define PDO_VAR_MAX_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_VAR_MAX_VOLT_SHIFT)
+#define PDO_VAR_MAX_CURR(ma) ((((ma) / 10) & PDO_CURR_MASK) << PDO_VAR_MAX_CURR_SHIFT)
+
+#define PDO_VAR(min_mv, max_mv, max_ma) \
+ (PDO_TYPE(PDO_TYPE_VAR) | PDO_VAR_MIN_VOLT(min_mv) | \
+ PDO_VAR_MAX_VOLT(max_mv) | PDO_VAR_MAX_CURR(max_ma))
+
+ #endif /* __DT_POWER_DELIVERY_H */
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index cfdd2484cc42..4f31f96bbfab 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -133,6 +133,7 @@ struct vgic_irq {
u8 source; /* GICv2 SGIs only */
u8 active_source; /* GICv2 SGIs only */
u8 priority;
+ u8 group; /* 0 == group 0, 1 == group 1 */
enum vgic_irq_config config; /* Level or edge */
/*
@@ -217,6 +218,12 @@ struct vgic_dist {
/* vGIC model the kernel emulates for the guest (GICv2 or GICv3) */
u32 vgic_model;
+ /* Implementation revision as reported in the GICD_IIDR */
+ u32 implementation_rev;
+
+ /* Userspace can write to GICv2 IGROUPR */
+ bool v2_groups_user_writable;
+
/* Do injected MSIs require an additional device ID? */
bool msis_require_devid;
@@ -366,7 +373,7 @@ void kvm_vgic_sync_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_vgic_reset_mapped_irq(struct kvm_vcpu *vcpu, u32 vintid);
-void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg);
+void vgic_v3_dispatch_sgi(struct kvm_vcpu *vcpu, u64 reg, bool allow_group1);
/**
* kvm_vgic_get_max_vcpus - Get the maximum number of VCPUs allowed by HW
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 4b35a66383f9..de8d3d3fa651 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -443,6 +443,9 @@ int acpi_check_resource_conflict(const struct resource *res);
int acpi_check_region(resource_size_t start, resource_size_t n,
const char *name);
+acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
+ u32 level);
+
int acpi_resources_are_enforced(void);
#ifdef CONFIG_HIBERNATION
@@ -1055,27 +1058,20 @@ static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
/* Device properties */
-#define MAX_ACPI_REFERENCE_ARGS 8
-struct acpi_reference_args {
- struct acpi_device *adev;
- size_t nargs;
- u64 args[MAX_ACPI_REFERENCE_ARGS];
-};
-
#ifdef CONFIG_ACPI
int acpi_dev_get_property(const struct acpi_device *adev, const char *name,
acpi_object_type type, const union acpi_object **obj);
int __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *name, size_t index, size_t num_args,
- struct acpi_reference_args *args);
+ struct fwnode_reference_args *args);
static inline int acpi_node_get_property_reference(
const struct fwnode_handle *fwnode,
const char *name, size_t index,
- struct acpi_reference_args *args)
+ struct fwnode_reference_args *args)
{
return __acpi_node_get_property_reference(fwnode, name, index,
- MAX_ACPI_REFERENCE_ARGS, args);
+ NR_FWNODE_REFERENCE_ARGS, args);
}
int acpi_node_prop_get(const struct fwnode_handle *fwnode, const char *propname,
@@ -1093,14 +1089,6 @@ struct fwnode_handle *acpi_get_next_subnode(const struct fwnode_handle *fwnode,
struct fwnode_handle *child);
struct fwnode_handle *acpi_node_get_parent(const struct fwnode_handle *fwnode);
-struct fwnode_handle *
-acpi_graph_get_next_endpoint(const struct fwnode_handle *fwnode,
- struct fwnode_handle *prev);
-int acpi_graph_get_remote_endpoint(const struct fwnode_handle *fwnode,
- struct fwnode_handle **remote,
- struct fwnode_handle **port,
- struct fwnode_handle **endpoint);
-
struct acpi_probe_entry;
typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *,
struct acpi_probe_entry *);
@@ -1166,7 +1154,7 @@ static inline int acpi_dev_get_property(struct acpi_device *adev,
static inline int
__acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *name, size_t index, size_t num_args,
- struct acpi_reference_args *args)
+ struct fwnode_reference_args *args)
{
return -ENXIO;
}
@@ -1174,7 +1162,7 @@ __acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
static inline int
acpi_node_get_property_reference(const struct fwnode_handle *fwnode,
const char *name, size_t index,
- struct acpi_reference_args *args)
+ struct fwnode_reference_args *args)
{
return -ENXIO;
}
diff --git a/include/linux/ascii85.h b/include/linux/ascii85.h
new file mode 100644
index 000000000000..4cc40201273e
--- /dev/null
+++ b/include/linux/ascii85.h
@@ -0,0 +1,38 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) 2008 Intel Corporation
+ * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _ASCII85_H_
+#define _ASCII85_H_
+
+#include <linux/kernel.h>
+
+#define ASCII85_BUFSZ 6
+
+static inline long
+ascii85_encode_len(long len)
+{
+ return DIV_ROUND_UP(len, 4);
+}
+
+static inline const char *
+ascii85_encode(u32 in, char *out)
+{
+ int i;
+
+ if (in == 0)
+ return "z";
+
+ out[5] = '\0';
+ for (i = 5; i--; ) {
+ out[i] = '!' + in % 85;
+ in /= 85;
+ }
+
+ return out;
+}
+
+#endif
diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
index 0c27515d2cf6..8124815eb121 100644
--- a/include/linux/atmdev.h
+++ b/include/linux/atmdev.h
@@ -214,6 +214,7 @@ struct atmphy_ops {
struct atm_skb_data {
struct atm_vcc *vcc; /* ATM VCC */
unsigned long atm_options; /* ATM layer options */
+ unsigned int acct_truesize; /* truesize accounted to vcc */
};
#define VCC_HTABLE_SIZE 32
@@ -241,6 +242,20 @@ void vcc_insert_socket(struct sock *sk);
void atm_dev_release_vccs(struct atm_dev *dev);
+static inline void atm_account_tx(struct atm_vcc *vcc, struct sk_buff *skb)
+{
+ /*
+ * Because ATM skbs may not belong to a sock (and we don't
+ * necessarily want to), skb->truesize may be adjusted,
+ * escaping the hack in pskb_expand_head() which avoids
+ * doing so for some cases. So stash the value of truesize
+ * at the time we accounted it, and atm_pop_raw() can use
+ * that value later, in case it changes.
+ */
+ refcount_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
+ ATM_SKB(skb)->acct_truesize = skb->truesize;
+ ATM_SKB(skb)->atm_options = vcc->atm_options;
+}
static inline void atm_force_charge(struct atm_vcc *vcc,int truesize)
{
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index 01ce3997cb42..1e8e88bdaf09 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -2,6 +2,8 @@
/* Atomic operations usable in machine independent code */
#ifndef _LINUX_ATOMIC_H
#define _LINUX_ATOMIC_H
+#include <linux/types.h>
+
#include <asm/atomic.h>
#include <asm/barrier.h>
@@ -36,40 +38,46 @@
* barriers on top of the relaxed variant. In the case where the relaxed
* variant is already fully ordered, no additional barriers are needed.
*
- * Besides, if an arch has a special barrier for acquire/release, it could
- * implement its own __atomic_op_* and use the same framework for building
- * variants
- *
- * If an architecture overrides __atomic_op_acquire() it will probably want
- * to define smp_mb__after_spinlock().
+ * If an architecture overrides __atomic_acquire_fence() it will probably
+ * want to define smp_mb__after_spinlock().
*/
-#ifndef __atomic_op_acquire
+#ifndef __atomic_acquire_fence
+#define __atomic_acquire_fence smp_mb__after_atomic
+#endif
+
+#ifndef __atomic_release_fence
+#define __atomic_release_fence smp_mb__before_atomic
+#endif
+
+#ifndef __atomic_pre_full_fence
+#define __atomic_pre_full_fence smp_mb__before_atomic
+#endif
+
+#ifndef __atomic_post_full_fence
+#define __atomic_post_full_fence smp_mb__after_atomic
+#endif
+
#define __atomic_op_acquire(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret = op##_relaxed(args); \
- smp_mb__after_atomic(); \
+ __atomic_acquire_fence(); \
__ret; \
})
-#endif
-#ifndef __atomic_op_release
#define __atomic_op_release(op, args...) \
({ \
- smp_mb__before_atomic(); \
+ __atomic_release_fence(); \
op##_relaxed(args); \
})
-#endif
-#ifndef __atomic_op_fence
#define __atomic_op_fence(op, args...) \
({ \
typeof(op##_relaxed(args)) __ret; \
- smp_mb__before_atomic(); \
+ __atomic_pre_full_fence(); \
__ret = op##_relaxed(args); \
- smp_mb__after_atomic(); \
+ __atomic_post_full_fence(); \
__ret; \
})
-#endif
/* atomic_add_return_relaxed */
#ifndef atomic_add_return_relaxed
@@ -95,11 +103,23 @@
#endif
#endif /* atomic_add_return_relaxed */
+#ifndef atomic_inc
+#define atomic_inc(v) atomic_add(1, (v))
+#endif
+
/* atomic_inc_return_relaxed */
#ifndef atomic_inc_return_relaxed
+
+#ifndef atomic_inc_return
+#define atomic_inc_return(v) atomic_add_return(1, (v))
+#define atomic_inc_return_relaxed(v) atomic_add_return_relaxed(1, (v))
+#define atomic_inc_return_acquire(v) atomic_add_return_acquire(1, (v))
+#define atomic_inc_return_release(v) atomic_add_return_release(1, (v))
+#else /* atomic_inc_return */
#define atomic_inc_return_relaxed atomic_inc_return
#define atomic_inc_return_acquire atomic_inc_return
#define atomic_inc_return_release atomic_inc_return
+#endif /* atomic_inc_return */
#else /* atomic_inc_return_relaxed */
@@ -143,11 +163,23 @@
#endif
#endif /* atomic_sub_return_relaxed */
+#ifndef atomic_dec
+#define atomic_dec(v) atomic_sub(1, (v))
+#endif
+
/* atomic_dec_return_relaxed */
#ifndef atomic_dec_return_relaxed
+
+#ifndef atomic_dec_return
+#define atomic_dec_return(v) atomic_sub_return(1, (v))
+#define atomic_dec_return_relaxed(v) atomic_sub_return_relaxed(1, (v))
+#define atomic_dec_return_acquire(v) atomic_sub_return_acquire(1, (v))
+#define atomic_dec_return_release(v) atomic_sub_return_release(1, (v))
+#else /* atomic_dec_return */
#define atomic_dec_return_relaxed atomic_dec_return
#define atomic_dec_return_acquire atomic_dec_return
#define atomic_dec_return_release atomic_dec_return
+#endif /* atomic_dec_return */
#else /* atomic_dec_return_relaxed */
@@ -328,12 +360,22 @@
#endif
#endif /* atomic_fetch_and_relaxed */
-#ifdef atomic_andnot
-/* atomic_fetch_andnot_relaxed */
+#ifndef atomic_andnot
+#define atomic_andnot(i, v) atomic_and(~(int)(i), (v))
+#endif
+
#ifndef atomic_fetch_andnot_relaxed
-#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
-#define atomic_fetch_andnot_acquire atomic_fetch_andnot
-#define atomic_fetch_andnot_release atomic_fetch_andnot
+
+#ifndef atomic_fetch_andnot
+#define atomic_fetch_andnot(i, v) atomic_fetch_and(~(int)(i), (v))
+#define atomic_fetch_andnot_relaxed(i, v) atomic_fetch_and_relaxed(~(int)(i), (v))
+#define atomic_fetch_andnot_acquire(i, v) atomic_fetch_and_acquire(~(int)(i), (v))
+#define atomic_fetch_andnot_release(i, v) atomic_fetch_and_release(~(int)(i), (v))
+#else /* atomic_fetch_andnot */
+#define atomic_fetch_andnot_relaxed atomic_fetch_andnot
+#define atomic_fetch_andnot_acquire atomic_fetch_andnot
+#define atomic_fetch_andnot_release atomic_fetch_andnot
+#endif /* atomic_fetch_andnot */
#else /* atomic_fetch_andnot_relaxed */
@@ -352,7 +394,6 @@
__atomic_op_fence(atomic_fetch_andnot, __VA_ARGS__)
#endif
#endif /* atomic_fetch_andnot_relaxed */
-#endif /* atomic_andnot */
/* atomic_fetch_xor_relaxed */
#ifndef atomic_fetch_xor_relaxed
@@ -520,112 +561,140 @@
#endif /* xchg_relaxed */
/**
+ * atomic_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns the original value of @v.
+ */
+#ifndef atomic_fetch_add_unless
+static inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ int c = atomic_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!atomic_try_cmpxchg(v, &c, c + a));
+
+ return c;
+}
+#endif
+
+/**
* atomic_add_unless - add unless the number is already a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
- * Atomically adds @a to @v, so long as @v was not already @u.
- * Returns non-zero if @v was not @u, and zero otherwise.
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
*/
-static inline int atomic_add_unless(atomic_t *v, int a, int u)
+static inline bool atomic_add_unless(atomic_t *v, int a, int u)
{
- return __atomic_add_unless(v, a, u) != u;
+ return atomic_fetch_add_unless(v, a, u) != u;
}
/**
* atomic_inc_not_zero - increment unless the number is zero
* @v: pointer of type atomic_t
*
- * Atomically increments @v by 1, so long as @v is non-zero.
- * Returns non-zero if @v was non-zero, and zero otherwise.
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
*/
#ifndef atomic_inc_not_zero
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
#endif
-#ifndef atomic_andnot
-static inline void atomic_andnot(int i, atomic_t *v)
-{
- atomic_and(~i, v);
-}
-
-static inline int atomic_fetch_andnot(int i, atomic_t *v)
-{
- return atomic_fetch_and(~i, v);
-}
-
-static inline int atomic_fetch_andnot_relaxed(int i, atomic_t *v)
+/**
+ * atomic_inc_and_test - increment and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic_inc_and_test
+static inline bool atomic_inc_and_test(atomic_t *v)
{
- return atomic_fetch_and_relaxed(~i, v);
+ return atomic_inc_return(v) == 0;
}
+#endif
-static inline int atomic_fetch_andnot_acquire(int i, atomic_t *v)
+/**
+ * atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+#ifndef atomic_dec_and_test
+static inline bool atomic_dec_and_test(atomic_t *v)
{
- return atomic_fetch_and_acquire(~i, v);
+ return atomic_dec_return(v) == 0;
}
+#endif
-static inline int atomic_fetch_andnot_release(int i, atomic_t *v)
+/**
+ * atomic_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic_sub_and_test
+static inline bool atomic_sub_and_test(int i, atomic_t *v)
{
- return atomic_fetch_and_release(~i, v);
+ return atomic_sub_return(i, v) == 0;
}
#endif
/**
- * atomic_inc_not_zero_hint - increment if not null
+ * atomic_add_negative - add and test if negative
+ * @i: integer value to add
* @v: pointer of type atomic_t
- * @hint: probable value of the atomic before the increment
- *
- * This version of atomic_inc_not_zero() gives a hint of probable
- * value of the atomic. This helps processor to not read the memory
- * before doing the atomic read/modify/write cycle, lowering
- * number of bus transactions on some arches.
*
- * Returns: 0 if increment was not done, 1 otherwise.
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
*/
-#ifndef atomic_inc_not_zero_hint
-static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
+#ifndef atomic_add_negative
+static inline bool atomic_add_negative(int i, atomic_t *v)
{
- int val, c = hint;
-
- /* sanity test, should be removed by compiler if hint is a constant */
- if (!hint)
- return atomic_inc_not_zero(v);
-
- do {
- val = atomic_cmpxchg(v, c, c + 1);
- if (val == c)
- return 1;
- c = val;
- } while (c);
-
- return 0;
+ return atomic_add_return(i, v) < 0;
}
#endif
#ifndef atomic_inc_unless_negative
-static inline int atomic_inc_unless_negative(atomic_t *p)
+static inline bool atomic_inc_unless_negative(atomic_t *v)
{
- int v, v1;
- for (v = 0; v >= 0; v = v1) {
- v1 = atomic_cmpxchg(p, v, v + 1);
- if (likely(v1 == v))
- return 1;
- }
- return 0;
+ int c = atomic_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!atomic_try_cmpxchg(v, &c, c + 1));
+
+ return true;
}
#endif
#ifndef atomic_dec_unless_positive
-static inline int atomic_dec_unless_positive(atomic_t *p)
+static inline bool atomic_dec_unless_positive(atomic_t *v)
{
- int v, v1;
- for (v = 0; v <= 0; v = v1) {
- v1 = atomic_cmpxchg(p, v, v - 1);
- if (likely(v1 == v))
- return 1;
- }
- return 0;
+ int c = atomic_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!atomic_try_cmpxchg(v, &c, c - 1));
+
+ return true;
}
#endif
@@ -639,17 +708,14 @@ static inline int atomic_dec_unless_positive(atomic_t *p)
#ifndef atomic_dec_if_positive
static inline int atomic_dec_if_positive(atomic_t *v)
{
- int c, old, dec;
- c = atomic_read(v);
- for (;;) {
+ int dec, c = atomic_read(v);
+
+ do {
dec = c - 1;
if (unlikely(dec < 0))
break;
- old = atomic_cmpxchg((v), c, dec);
- if (likely(old == c))
- break;
- c = old;
- }
+ } while (!atomic_try_cmpxchg(v, &c, dec));
+
return dec;
}
#endif
@@ -693,11 +759,23 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#endif
#endif /* atomic64_add_return_relaxed */
+#ifndef atomic64_inc
+#define atomic64_inc(v) atomic64_add(1, (v))
+#endif
+
/* atomic64_inc_return_relaxed */
#ifndef atomic64_inc_return_relaxed
+
+#ifndef atomic64_inc_return
+#define atomic64_inc_return(v) atomic64_add_return(1, (v))
+#define atomic64_inc_return_relaxed(v) atomic64_add_return_relaxed(1, (v))
+#define atomic64_inc_return_acquire(v) atomic64_add_return_acquire(1, (v))
+#define atomic64_inc_return_release(v) atomic64_add_return_release(1, (v))
+#else /* atomic64_inc_return */
#define atomic64_inc_return_relaxed atomic64_inc_return
#define atomic64_inc_return_acquire atomic64_inc_return
#define atomic64_inc_return_release atomic64_inc_return
+#endif /* atomic64_inc_return */
#else /* atomic64_inc_return_relaxed */
@@ -742,11 +820,23 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#endif
#endif /* atomic64_sub_return_relaxed */
+#ifndef atomic64_dec
+#define atomic64_dec(v) atomic64_sub(1, (v))
+#endif
+
/* atomic64_dec_return_relaxed */
#ifndef atomic64_dec_return_relaxed
+
+#ifndef atomic64_dec_return
+#define atomic64_dec_return(v) atomic64_sub_return(1, (v))
+#define atomic64_dec_return_relaxed(v) atomic64_sub_return_relaxed(1, (v))
+#define atomic64_dec_return_acquire(v) atomic64_sub_return_acquire(1, (v))
+#define atomic64_dec_return_release(v) atomic64_sub_return_release(1, (v))
+#else /* atomic64_dec_return */
#define atomic64_dec_return_relaxed atomic64_dec_return
#define atomic64_dec_return_acquire atomic64_dec_return
#define atomic64_dec_return_release atomic64_dec_return
+#endif /* atomic64_dec_return */
#else /* atomic64_dec_return_relaxed */
@@ -927,12 +1017,22 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#endif
#endif /* atomic64_fetch_and_relaxed */
-#ifdef atomic64_andnot
-/* atomic64_fetch_andnot_relaxed */
+#ifndef atomic64_andnot
+#define atomic64_andnot(i, v) atomic64_and(~(long long)(i), (v))
+#endif
+
#ifndef atomic64_fetch_andnot_relaxed
-#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
-#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
-#define atomic64_fetch_andnot_release atomic64_fetch_andnot
+
+#ifndef atomic64_fetch_andnot
+#define atomic64_fetch_andnot(i, v) atomic64_fetch_and(~(long long)(i), (v))
+#define atomic64_fetch_andnot_relaxed(i, v) atomic64_fetch_and_relaxed(~(long long)(i), (v))
+#define atomic64_fetch_andnot_acquire(i, v) atomic64_fetch_and_acquire(~(long long)(i), (v))
+#define atomic64_fetch_andnot_release(i, v) atomic64_fetch_and_release(~(long long)(i), (v))
+#else /* atomic64_fetch_andnot */
+#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot
+#define atomic64_fetch_andnot_acquire atomic64_fetch_andnot
+#define atomic64_fetch_andnot_release atomic64_fetch_andnot
+#endif /* atomic64_fetch_andnot */
#else /* atomic64_fetch_andnot_relaxed */
@@ -951,7 +1051,6 @@ static inline int atomic_dec_if_positive(atomic_t *v)
__atomic_op_fence(atomic64_fetch_andnot, __VA_ARGS__)
#endif
#endif /* atomic64_fetch_andnot_relaxed */
-#endif /* atomic64_andnot */
/* atomic64_fetch_xor_relaxed */
#ifndef atomic64_fetch_xor_relaxed
@@ -1049,30 +1148,164 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
#endif /* atomic64_try_cmpxchg */
-#ifndef atomic64_andnot
-static inline void atomic64_andnot(long long i, atomic64_t *v)
+/**
+ * atomic64_fetch_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic64_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns the original value of @v.
+ */
+#ifndef atomic64_fetch_add_unless
+static inline long long atomic64_fetch_add_unless(atomic64_t *v, long long a,
+ long long u)
{
- atomic64_and(~i, v);
+ long long c = atomic64_read(v);
+
+ do {
+ if (unlikely(c == u))
+ break;
+ } while (!atomic64_try_cmpxchg(v, &c, c + a));
+
+ return c;
}
+#endif
-static inline long long atomic64_fetch_andnot(long long i, atomic64_t *v)
+/**
+ * atomic64_add_unless - add unless the number is already a given value
+ * @v: pointer of type atomic_t
+ * @a: the amount to add to v...
+ * @u: ...unless v is equal to u.
+ *
+ * Atomically adds @a to @v, if @v was not already @u.
+ * Returns true if the addition was done.
+ */
+static inline bool atomic64_add_unless(atomic64_t *v, long long a, long long u)
{
- return atomic64_fetch_and(~i, v);
+ return atomic64_fetch_add_unless(v, a, u) != u;
}
-static inline long long atomic64_fetch_andnot_relaxed(long long i, atomic64_t *v)
+/**
+ * atomic64_inc_not_zero - increment unless the number is zero
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1, if @v is non-zero.
+ * Returns true if the increment was done.
+ */
+#ifndef atomic64_inc_not_zero
+#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
+#endif
+
+/**
+ * atomic64_inc_and_test - increment and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic64_inc_and_test
+static inline bool atomic64_inc_and_test(atomic64_t *v)
{
- return atomic64_fetch_and_relaxed(~i, v);
+ return atomic64_inc_return(v) == 0;
}
+#endif
-static inline long long atomic64_fetch_andnot_acquire(long long i, atomic64_t *v)
+/**
+ * atomic64_dec_and_test - decrement and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+#ifndef atomic64_dec_and_test
+static inline bool atomic64_dec_and_test(atomic64_t *v)
{
- return atomic64_fetch_and_acquire(~i, v);
+ return atomic64_dec_return(v) == 0;
}
+#endif
-static inline long long atomic64_fetch_andnot_release(long long i, atomic64_t *v)
+/**
+ * atomic64_sub_and_test - subtract value from variable and test result
+ * @i: integer value to subtract
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically subtracts @i from @v and returns
+ * true if the result is zero, or false for all
+ * other cases.
+ */
+#ifndef atomic64_sub_and_test
+static inline bool atomic64_sub_and_test(long long i, atomic64_t *v)
+{
+ return atomic64_sub_return(i, v) == 0;
+}
+#endif
+
+/**
+ * atomic64_add_negative - add and test if negative
+ * @i: integer value to add
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically adds @i to @v and returns true
+ * if the result is negative, or false when
+ * result is greater than or equal to zero.
+ */
+#ifndef atomic64_add_negative
+static inline bool atomic64_add_negative(long long i, atomic64_t *v)
{
- return atomic64_fetch_and_release(~i, v);
+ return atomic64_add_return(i, v) < 0;
+}
+#endif
+
+#ifndef atomic64_inc_unless_negative
+static inline bool atomic64_inc_unless_negative(atomic64_t *v)
+{
+ long long c = atomic64_read(v);
+
+ do {
+ if (unlikely(c < 0))
+ return false;
+ } while (!atomic64_try_cmpxchg(v, &c, c + 1));
+
+ return true;
+}
+#endif
+
+#ifndef atomic64_dec_unless_positive
+static inline bool atomic64_dec_unless_positive(atomic64_t *v)
+{
+ long long c = atomic64_read(v);
+
+ do {
+ if (unlikely(c > 0))
+ return false;
+ } while (!atomic64_try_cmpxchg(v, &c, c - 1));
+
+ return true;
+}
+#endif
+
+/*
+ * atomic64_dec_if_positive - decrement by 1 if old value positive
+ * @v: pointer of type atomic64_t
+ *
+ * The function returns the old value of *v minus 1, even if
+ * the atomic64 variable, v, was not decremented.
+ */
+#ifndef atomic64_dec_if_positive
+static inline long long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long long dec, c = atomic64_read(v);
+
+ do {
+ dec = c - 1;
+ if (unlikely(dec < 0))
+ break;
+ } while (!atomic64_try_cmpxchg(v, &c, dec));
+
+ return dec;
}
#endif
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 69c78477590b..9334fbef7bae 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -117,6 +117,9 @@ struct filename;
extern void audit_log_session_info(struct audit_buffer *ab);
+#define AUDIT_OFF 0
+#define AUDIT_ON 1
+#define AUDIT_LOCKED 2
#ifdef CONFIG_AUDIT
/* These are defined in audit.c */
/* Public API */
@@ -202,7 +205,7 @@ static inline int audit_log_task_context(struct audit_buffer *ab)
static inline void audit_log_task_info(struct audit_buffer *ab,
struct task_struct *tsk)
{ }
-#define audit_enabled 0
+#define audit_enabled AUDIT_OFF
#endif /* CONFIG_AUDIT */
#ifdef CONFIG_AUDIT_COMPAT_GENERIC
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 0bd432a4d7bd..9a6bc0951cfa 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -12,6 +12,7 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/kref.h>
+#include <linux/refcount.h>
struct page;
struct device;
@@ -22,7 +23,6 @@ struct dentry;
*/
enum wb_state {
WB_registered, /* bdi_register() was done */
- WB_shutting_down, /* wb_shutdown() in progress */
WB_writeback_running, /* Writeback is in progress */
WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
WB_start_all, /* nr_pages == 0 (all) work pending */
@@ -76,7 +76,7 @@ enum wb_reason {
*/
struct bdi_writeback_congested {
unsigned long state; /* WB_[a]sync_congested flags */
- atomic_t refcnt; /* nr of attached wb's and blkg */
+ refcount_t refcnt; /* nr of attached wb's and blkg */
#ifdef CONFIG_CGROUP_WRITEBACK
struct backing_dev_info *__bdi; /* the associated bdi, set to NULL
@@ -189,6 +189,7 @@ struct backing_dev_info {
#ifdef CONFIG_CGROUP_WRITEBACK
struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
struct rb_root cgwb_congested_tree; /* their congested states */
+ struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
#else
struct bdi_writeback_congested *wb_congested;
#endif
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 72ca0f3d39f3..c28a47cbe355 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -404,13 +404,13 @@ static inline bool inode_cgwb_enabled(struct inode *inode)
static inline struct bdi_writeback_congested *
wb_congested_get_create(struct backing_dev_info *bdi, int blkcg_id, gfp_t gfp)
{
- atomic_inc(&bdi->wb_congested->refcnt);
+ refcount_inc(&bdi->wb_congested->refcnt);
return bdi->wb_congested;
}
static inline void wb_congested_put(struct bdi_writeback_congested *congested)
{
- if (atomic_dec_and_test(&congested->refcnt))
+ if (refcount_dec_and_test(&congested->refcnt))
kfree(congested);
}
diff --git a/include/linux/backlight.h b/include/linux/backlight.h
index 7fbf0539e14a..0b5897446dca 100644
--- a/include/linux/backlight.h
+++ b/include/linux/backlight.h
@@ -79,7 +79,6 @@ struct backlight_properties {
/* Backlight type */
enum backlight_type type;
/* Flags used to signal drivers of state changes */
- /* Upper 4 bits are reserved for driver internal use */
unsigned int state;
#define BL_CORE_SUSPENDED (1 << 0) /* backlight is suspended */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index f08f5fe7bd08..51371740d2a8 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -429,7 +429,6 @@ extern void bio_put(struct bio *);
extern void __bio_clone_fast(struct bio *, struct bio *);
extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
-extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
extern struct bio_set fs_bio_set;
@@ -443,12 +442,6 @@ static inline struct bio *bio_kmalloc(gfp_t gfp_mask, unsigned int nr_iovecs)
return bio_alloc_bioset(gfp_mask, nr_iovecs, NULL);
}
-static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
-{
- return bio_clone_bioset(bio, gfp_mask, NULL);
-
-}
-
extern blk_qc_t submit_bio(struct bio *);
extern void bio_endio(struct bio *);
@@ -496,9 +489,9 @@ extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
-void generic_start_io_acct(struct request_queue *q, int rw,
+void generic_start_io_acct(struct request_queue *q, int op,
unsigned long sectors, struct hd_struct *part);
-void generic_end_io_acct(struct request_queue *q, int rw,
+void generic_end_io_acct(struct request_queue *q, int op,
struct hd_struct *part,
unsigned long start_time);
@@ -553,8 +546,16 @@ do { \
#define bio_dev(bio) \
disk_devt((bio)->bi_disk)
+#if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
+int bio_associate_blkcg_from_page(struct bio *bio, struct page *page);
+#else
+static inline int bio_associate_blkcg_from_page(struct bio *bio,
+ struct page *page) { return 0; }
+#endif
+
#ifdef CONFIG_BLK_CGROUP
int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css);
+int bio_associate_blkg(struct bio *bio, struct blkcg_gq *blkg);
void bio_disassociate_task(struct bio *bio);
void bio_clone_blkcg_association(struct bio *dst, struct bio *src);
#else /* CONFIG_BLK_CGROUP */
diff --git a/include/linux/bitfield.h b/include/linux/bitfield.h
index cf2588d81148..3f1ef4450a7c 100644
--- a/include/linux/bitfield.h
+++ b/include/linux/bitfield.h
@@ -53,7 +53,7 @@
({ \
BUILD_BUG_ON_MSG(!__builtin_constant_p(_mask), \
_pfx "mask is not constant"); \
- BUILD_BUG_ON_MSG(!(_mask), _pfx "mask is zero"); \
+ BUILD_BUG_ON_MSG((_mask) == 0, _pfx "mask is zero"); \
BUILD_BUG_ON_MSG(__builtin_constant_p(_val) ? \
~((_mask) >> __bf_shf(_mask)) & (_val) : 0, \
_pfx "value too large for the field"); \
@@ -104,7 +104,7 @@
(typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
})
-extern void __compiletime_warning("value doesn't fit into mask")
+extern void __compiletime_error("value doesn't fit into mask")
__field_overflow(void);
extern void __compiletime_error("bad bitfield mask")
__bad_mask(void);
@@ -121,8 +121,8 @@ static __always_inline u64 field_mask(u64 field)
#define ____MAKE_OP(type,base,to,from) \
static __always_inline __##type type##_encode_bits(base v, base field) \
{ \
- if (__builtin_constant_p(v) && (v & ~field_multiplier(field))) \
- __field_overflow(); \
+ if (__builtin_constant_p(v) && (v & ~field_mask(field))) \
+ __field_overflow(); \
return to((v & field_mask(field)) * field_multiplier(field)); \
} \
static __always_inline __##type type##_replace_bits(__##type old, \
@@ -143,6 +143,7 @@ static __always_inline base type##_get_bits(__##type v, base field) \
____MAKE_OP(le##size,u##size,cpu_to_le##size,le##size##_to_cpu) \
____MAKE_OP(be##size,u##size,cpu_to_be##size,be##size##_to_cpu) \
____MAKE_OP(u##size,u##size,,)
+____MAKE_OP(u8,u8,,)
__MAKE_OP(16)
__MAKE_OP(32)
__MAKE_OP(64)
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 1ee46f492267..acf5e8df3504 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -105,6 +105,14 @@
*/
/*
+ * Allocation and deallocation of bitmap.
+ * Provided in lib/bitmap.c to avoid circular dependency.
+ */
+extern unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags);
+extern unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags);
+extern void bitmap_free(const unsigned long *bitmap);
+
+/*
* lib/bitmap.c provides these functions:
*/
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index 4cac4e1a72ff..7ddb1349394d 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -2,29 +2,10 @@
#ifndef _LINUX_BITOPS_H
#define _LINUX_BITOPS_H
#include <asm/types.h>
+#include <linux/bits.h>
-#ifdef __KERNEL__
-#define BIT(nr) (1UL << (nr))
-#define BIT_ULL(nr) (1ULL << (nr))
-#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
-#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
-#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
-#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
-#define BITS_PER_BYTE 8
-#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
-#endif
-
-/*
- * Create a contiguous bitmask starting at bit position @l and ending at
- * position @h. For example
- * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
- */
-#define GENMASK(h, l) \
- (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
-
-#define GENMASK_ULL(h, l) \
- (((~0ULL) - (1ULL << (l)) + 1) & \
- (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
+#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
diff --git a/include/linux/bits.h b/include/linux/bits.h
new file mode 100644
index 000000000000..2b7b532c1d51
--- /dev/null
+++ b/include/linux/bits.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITS_H
+#define __LINUX_BITS_H
+#include <asm/bitsperlong.h>
+
+#define BIT(nr) (1UL << (nr))
+#define BIT_ULL(nr) (1ULL << (nr))
+#define BIT_MASK(nr) (1UL << ((nr) % BITS_PER_LONG))
+#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
+#define BIT_ULL_MASK(nr) (1ULL << ((nr) % BITS_PER_LONG_LONG))
+#define BIT_ULL_WORD(nr) ((nr) / BITS_PER_LONG_LONG)
+#define BITS_PER_BYTE 8
+
+/*
+ * Create a contiguous bitmask starting at bit position @l and ending at
+ * position @h. For example
+ * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
+ */
+#define GENMASK(h, l) \
+ (((~0UL) - (1UL << (l)) + 1) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
+
+#define GENMASK_ULL(h, l) \
+ (((~0ULL) - (1ULL << (l)) + 1) & \
+ (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
+
+#endif /* __LINUX_BITS_H */
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index 6c666fd7de3c..34aec30e06c7 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -35,6 +35,7 @@ enum blkg_rwstat_type {
BLKG_RWSTAT_WRITE,
BLKG_RWSTAT_SYNC,
BLKG_RWSTAT_ASYNC,
+ BLKG_RWSTAT_DISCARD,
BLKG_RWSTAT_NR,
BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
@@ -136,6 +137,12 @@ struct blkcg_gq {
struct blkg_policy_data *pd[BLKCG_MAX_POLS];
struct rcu_head rcu_head;
+
+ atomic_t use_delay;
+ atomic64_t delay_nsec;
+ atomic64_t delay_start;
+ u64 last_delay;
+ int last_use;
};
typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
@@ -148,6 +155,8 @@ typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
+typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
+ size_t size);
struct blkcg_policy {
int plid;
@@ -167,6 +176,7 @@ struct blkcg_policy {
blkcg_pol_offline_pd_fn *pd_offline_fn;
blkcg_pol_free_pd_fn *pd_free_fn;
blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
+ blkcg_pol_stat_pd_fn *pd_stat_fn;
};
extern struct blkcg blkcg_root;
@@ -238,6 +248,42 @@ static inline struct blkcg *bio_blkcg(struct bio *bio)
return css_to_blkcg(task_css(current, io_cgrp_id));
}
+static inline bool blk_cgroup_congested(void)
+{
+ struct cgroup_subsys_state *css;
+ bool ret = false;
+
+ rcu_read_lock();
+ css = kthread_blkcg();
+ if (!css)
+ css = task_css(current, io_cgrp_id);
+ while (css) {
+ if (atomic_read(&css->cgroup->congestion_count)) {
+ ret = true;
+ break;
+ }
+ css = css->parent;
+ }
+ rcu_read_unlock();
+ return ret;
+}
+
+/**
+ * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
+ * @return: true if this bio needs to be submitted with the root blkg context.
+ *
+ * In order to avoid priority inversions we sometimes need to issue a bio as if
+ * it were attached to the root blkg, and then backcharge to the actual owning
+ * blkg. The idea is we do bio_blkcg() to look up the actual context for the
+ * bio and attach the appropriate blkg to the bio. Then we call this helper and
+ * if it is true run with the root blkg for that queue and then do any
+ * backcharging to the originating cgroup once the io is complete.
+ */
+static inline bool bio_issue_as_root_blkg(struct bio *bio)
+{
+ return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
+}
+
/**
* blkcg_parent - get the parent of a blkcg
* @blkcg: blkcg of interest
@@ -296,6 +342,17 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
}
/**
+ * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
+ * @q: request_queue of interest
+ *
+ * Lookup blkg for @q at the root level. See also blkg_lookup().
+ */
+static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
+{
+ return q->root_blkg;
+}
+
+/**
* blkg_to_pdata - get policy private data
* @blkg: blkg of interest
* @pol: policy of interest
@@ -355,6 +412,21 @@ static inline void blkg_get(struct blkcg_gq *blkg)
atomic_inc(&blkg->refcnt);
}
+/**
+ * blkg_try_get - try and get a blkg reference
+ * @blkg: blkg to get
+ *
+ * This is for use when doing an RCU lookup of the blkg. We may be in the midst
+ * of freeing this blkg, so we can only use it if the refcnt is not zero.
+ */
+static inline struct blkcg_gq *blkg_try_get(struct blkcg_gq *blkg)
+{
+ if (atomic_inc_not_zero(&blkg->refcnt))
+ return blkg;
+ return NULL;
+}
+
+
void __blkg_release_rcu(struct rcu_head *rcu);
/**
@@ -589,7 +661,9 @@ static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
{
struct percpu_counter *cnt;
- if (op_is_write(op))
+ if (op_is_discard(op))
+ cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
+ else if (op_is_write(op))
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
else
cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
@@ -706,8 +780,14 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
if (!throtl) {
blkg = blkg ?: q->root_blkg;
- blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
- bio->bi_iter.bi_size);
+ /*
+ * If the bio is flagged with BIO_QUEUE_ENTERED it means this
+ * is a split bio and we would have already accounted for the
+ * size of the bio.
+ */
+ if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
+ blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
+ bio->bi_iter.bi_size);
blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
}
@@ -715,6 +795,59 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
return !throtl;
}
+static inline void blkcg_use_delay(struct blkcg_gq *blkg)
+{
+ if (atomic_add_return(1, &blkg->use_delay) == 1)
+ atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
+}
+
+static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
+{
+ int old = atomic_read(&blkg->use_delay);
+
+ if (old == 0)
+ return 0;
+
+ /*
+ * We do this song and dance because we can race with somebody else
+ * adding or removing delay. If we just did an atomic_dec we'd end up
+ * negative and we'd already be in trouble. We need to subtract 1 and
+ * then check to see if we were the last delay so we can drop the
+ * congestion count on the cgroup.
+ */
+ while (old) {
+ int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
+ if (cur == old)
+ break;
+ old = cur;
+ }
+
+ if (old == 0)
+ return 0;
+ if (old == 1)
+ atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
+ return 1;
+}
+
+static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
+{
+ int old = atomic_read(&blkg->use_delay);
+ if (!old)
+ return;
+ /* We only want 1 person clearing the congestion count for this blkg. */
+ while (old) {
+ int cur = atomic_cmpxchg(&blkg->use_delay, old, 0);
+ if (cur == old) {
+ atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
+ break;
+ }
+ old = cur;
+ }
+}
+
+void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
+void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
+void blkcg_maybe_throttle_current(void);
#else /* CONFIG_BLK_CGROUP */
struct blkcg {
@@ -734,9 +867,16 @@ struct blkcg_policy {
#define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
+static inline void blkcg_maybe_throttle_current(void) { }
+static inline bool blk_cgroup_congested(void) { return false; }
+
#ifdef CONFIG_BLOCK
+static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
+
static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
+static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
+{ return NULL; }
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
static inline void blkcg_drain_queue(struct request_queue *q) { }
static inline void blkcg_exit_queue(struct request_queue *q) { }
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index e3147eb74222..1da59c16f637 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -35,10 +35,12 @@ struct blk_mq_hw_ctx {
struct sbitmap ctx_map;
struct blk_mq_ctx *dispatch_from;
+ unsigned int dispatch_busy;
- struct blk_mq_ctx **ctxs;
unsigned int nr_ctx;
+ struct blk_mq_ctx **ctxs;
+ spinlock_t dispatch_wait_lock;
wait_queue_entry_t dispatch_wait;
atomic_t wait_index;
@@ -287,6 +289,20 @@ void blk_mq_update_nr_hw_queues(struct blk_mq_tag_set *set, int nr_hw_queues);
void blk_mq_quiesce_queue_nowait(struct request_queue *q);
+/**
+ * blk_mq_mark_complete() - Set request state to complete
+ * @rq: request to set to complete state
+ *
+ * Returns true if request state was successfully set to complete. If
+ * successful, the caller is responsibile for seeing this request is ended, as
+ * blk_mq_complete_request will not work again.
+ */
+static inline bool blk_mq_mark_complete(struct request *rq)
+{
+ return cmpxchg(&rq->state, MQ_RQ_IN_FLIGHT, MQ_RQ_COMPLETE) ==
+ MQ_RQ_IN_FLIGHT;
+}
+
/*
* Driver command data is immediately after the request. So subtract request
* size to get back to the original request, add request size to get the PDU.
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 3c4f390aea4b..f6dfb30737d8 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -179,11 +179,9 @@ struct bio {
*/
struct io_context *bi_ioc;
struct cgroup_subsys_state *bi_css;
-#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
- void *bi_cg_private;
+ struct blkcg_gq *bi_blkg;
struct bio_issue bi_issue;
#endif
-#endif
union {
#if defined(CONFIG_BLK_DEV_INTEGRITY)
struct bio_integrity_payload *bi_integrity; /* data integrity */
@@ -329,7 +327,7 @@ enum req_flag_bits {
/* for driver use */
__REQ_DRV,
-
+ __REQ_SWAP, /* swapping request. */
__REQ_NR_BITS, /* stops here */
};
@@ -351,6 +349,7 @@ enum req_flag_bits {
#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
#define REQ_DRV (1ULL << __REQ_DRV)
+#define REQ_SWAP (1ULL << __REQ_SWAP)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
@@ -358,6 +357,14 @@ enum req_flag_bits {
#define REQ_NOMERGE_FLAGS \
(REQ_NOMERGE | REQ_PREFLUSH | REQ_FUA)
+enum stat_group {
+ STAT_READ,
+ STAT_WRITE,
+ STAT_DISCARD,
+
+ NR_STAT_GROUPS
+};
+
#define bio_op(bio) \
((bio)->bi_opf & REQ_OP_MASK)
#define req_op(req) \
@@ -395,6 +402,18 @@ static inline bool op_is_sync(unsigned int op)
(op & (REQ_SYNC | REQ_FUA | REQ_PREFLUSH));
}
+static inline bool op_is_discard(unsigned int op)
+{
+ return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
+}
+
+static inline int op_stat_group(unsigned int op)
+{
+ if (op_is_discard(op))
+ return STAT_DISCARD;
+ return op_is_write(op);
+}
+
typedef unsigned int blk_qc_t;
#define BLK_QC_T_NONE -1U
#define BLK_QC_T_SHIFT 16
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 9154570edf29..d6869e0e2b64 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -27,8 +27,6 @@
#include <linux/percpu-refcount.h>
#include <linux/scatterlist.h>
#include <linux/blkzoned.h>
-#include <linux/seqlock.h>
-#include <linux/u64_stats_sync.h>
struct module;
struct scsi_ioctl_command;
@@ -42,7 +40,7 @@ struct bsg_job;
struct blkcg_gq;
struct blk_flush_queue;
struct pr_ops;
-struct rq_wb;
+struct rq_qos;
struct blk_queue_stats;
struct blk_stat_callback;
@@ -442,10 +440,8 @@ struct request_queue {
int nr_rqs[2]; /* # allocated [a]sync rqs */
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
- atomic_t shared_hctx_restart;
-
struct blk_queue_stats *stats;
- struct rq_wb *rq_wb;
+ struct rq_qos *rq_qos;
/*
* If blkcg is not used, @q->root_rl serves all requests. If blkcg
@@ -592,6 +588,7 @@ struct request_queue {
struct queue_limits limits;
+#ifdef CONFIG_BLK_DEV_ZONED
/*
* Zoned block device information for request dispatch control.
* nr_zones is the total number of zones of the device. This is always
@@ -612,6 +609,7 @@ struct request_queue {
unsigned int nr_zones;
unsigned long *seq_zones_bitmap;
unsigned long *seq_zones_wlock;
+#endif /* CONFIG_BLK_DEV_ZONED */
/*
* sg stuff
@@ -800,11 +798,7 @@ static inline unsigned int blk_queue_zone_sectors(struct request_queue *q)
return blk_queue_is_zoned(q) ? q->limits.chunk_sectors : 0;
}
-static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
-{
- return q->nr_zones;
-}
-
+#ifdef CONFIG_BLK_DEV_ZONED
static inline unsigned int blk_queue_zone_no(struct request_queue *q,
sector_t sector)
{
@@ -820,6 +814,7 @@ static inline bool blk_queue_zone_is_seq(struct request_queue *q,
return false;
return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap);
}
+#endif /* CONFIG_BLK_DEV_ZONED */
static inline bool rq_is_sync(struct request *rq)
{
@@ -1070,6 +1065,7 @@ static inline unsigned int blk_rq_cur_sectors(const struct request *rq)
return blk_rq_cur_bytes(rq) >> SECTOR_SHIFT;
}
+#ifdef CONFIG_BLK_DEV_ZONED
static inline unsigned int blk_rq_zone_no(struct request *rq)
{
return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
@@ -1079,6 +1075,7 @@ static inline unsigned int blk_rq_zone_is_seq(struct request *rq)
{
return blk_queue_zone_is_seq(rq->q, blk_rq_pos(rq));
}
+#endif /* CONFIG_BLK_DEV_ZONED */
/*
* Some commands like WRITE SAME have a payload or data transfer size which
@@ -1119,8 +1116,8 @@ static inline unsigned int blk_max_size_offset(struct request_queue *q,
if (!q->limits.chunk_sectors)
return q->limits.max_sectors;
- return q->limits.chunk_sectors -
- (offset & (q->limits.chunk_sectors - 1));
+ return min(q->limits.max_sectors, (unsigned int)(q->limits.chunk_sectors -
+ (offset & (q->limits.chunk_sectors - 1))));
}
static inline unsigned int blk_rq_get_max_sectors(struct request *rq,
@@ -1437,8 +1434,6 @@ enum blk_default_limits {
BLK_SEG_BOUNDARY_MASK = 0xFFFFFFFFUL,
};
-#define blkdev_entry_to_request(entry) list_entry((entry), struct request, queuelist)
-
static inline unsigned long queue_segment_boundary(struct request_queue *q)
{
return q->limits.seg_boundary_mask;
@@ -1639,15 +1634,6 @@ static inline unsigned int bdev_zone_sectors(struct block_device *bdev)
return 0;
}
-static inline unsigned int bdev_nr_zones(struct block_device *bdev)
-{
- struct request_queue *q = bdev_get_queue(bdev);
-
- if (q)
- return blk_queue_nr_zones(q);
- return 0;
-}
-
static inline int queue_dma_alignment(struct request_queue *q)
{
return q ? q->dma_alignment : 511;
@@ -1877,6 +1863,28 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
bip_next->bip_vec[0].bv_offset);
}
+/**
+ * bio_integrity_intervals - Return number of integrity intervals for a bio
+ * @bi: blk_integrity profile for device
+ * @sectors: Size of the bio in 512-byte sectors
+ *
+ * Description: The block layer calculates everything in 512 byte
+ * sectors but integrity metadata is done in terms of the data integrity
+ * interval size of the storage device. Convert the block layer sectors
+ * to the appropriate number of integrity intervals.
+ */
+static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return sectors >> (bi->interval_exp - 9);
+}
+
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return bio_integrity_intervals(bi, sectors) * bi->tuple_size;
+}
+
#else /* CONFIG_BLK_DEV_INTEGRITY */
struct bio;
@@ -1950,12 +1958,24 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
return false;
}
+static inline unsigned int bio_integrity_intervals(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return 0;
+}
+
+static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
+ unsigned int sectors)
+{
+ return 0;
+}
+
#endif /* CONFIG_BLK_DEV_INTEGRITY */
struct block_device_operations {
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
- int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
+ int (*rw_page)(struct block_device *, sector_t, struct page *, unsigned int);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
unsigned int (*check_events) (struct gendisk *disk,
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 7942a96b1a9d..42515195d7d8 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -27,9 +27,20 @@ extern unsigned long max_pfn;
extern unsigned long long max_possible_pfn;
#ifndef CONFIG_NO_BOOTMEM
-/*
- * node_bootmem_map is a map pointer - the bits represent all physical
- * memory pages (including holes) on the node.
+/**
+ * struct bootmem_data - per-node information used by the bootmem allocator
+ * @node_min_pfn: the starting physical address of the node's memory
+ * @node_low_pfn: the end physical address of the directly addressable memory
+ * @node_bootmem_map: is a bitmap pointer - the bits represent all physical
+ * memory pages (including holes) on the node.
+ * @last_end_off: the offset within the page of the end of the last allocation;
+ * if 0, the page used is full
+ * @hint_idx: the PFN of the page used with the last allocation;
+ * together with using this with the @last_end_offset field,
+ * a test can be made to see if allocations can be merged
+ * with the page used for the last allocation rather than
+ * using up a full new page.
+ * @list: list entry in the linked list ordered by the memory addresses
*/
typedef struct bootmem_data {
unsigned long node_min_pfn;
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 975fb4cf1bb7..f91b0f8ff3a9 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -2,23 +2,48 @@
#ifndef _BPF_CGROUP_H
#define _BPF_CGROUP_H
+#include <linux/errno.h>
#include <linux/jump_label.h>
+#include <linux/percpu.h>
+#include <linux/rbtree.h>
#include <uapi/linux/bpf.h>
struct sock;
struct sockaddr;
struct cgroup;
struct sk_buff;
+struct bpf_map;
+struct bpf_prog;
struct bpf_sock_ops_kern;
+struct bpf_cgroup_storage;
#ifdef CONFIG_CGROUP_BPF
extern struct static_key_false cgroup_bpf_enabled_key;
#define cgroup_bpf_enabled static_branch_unlikely(&cgroup_bpf_enabled_key)
+DECLARE_PER_CPU(void*, bpf_cgroup_storage);
+
+struct bpf_cgroup_storage_map;
+
+struct bpf_storage_buffer {
+ struct rcu_head rcu;
+ char data[0];
+};
+
+struct bpf_cgroup_storage {
+ struct bpf_storage_buffer *buf;
+ struct bpf_cgroup_storage_map *map;
+ struct bpf_cgroup_storage_key key;
+ struct list_head list;
+ struct rb_node node;
+ struct rcu_head rcu;
+};
+
struct bpf_prog_list {
struct list_head node;
struct bpf_prog *prog;
+ struct bpf_cgroup_storage *storage;
};
struct bpf_prog_array;
@@ -76,6 +101,26 @@ int __cgroup_bpf_run_filter_sock_ops(struct sock *sk,
int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
short access, enum bpf_attach_type type);
+static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage)
+{
+ struct bpf_storage_buffer *buf;
+
+ if (!storage)
+ return;
+
+ buf = READ_ONCE(storage->buf);
+ this_cpu_write(bpf_cgroup_storage, &buf->data[0]);
+}
+
+struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(struct bpf_prog *prog);
+void bpf_cgroup_storage_free(struct bpf_cgroup_storage *storage);
+void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
+ struct cgroup *cgroup,
+ enum bpf_attach_type type);
+void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
+int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
+void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
+
/* Wrappers for __cgroup_bpf_run_filter_skb() guarded by cgroup_bpf_enabled. */
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk, skb) \
({ \
@@ -188,12 +233,48 @@ int __cgroup_bpf_check_dev_permission(short dev_type, u32 major, u32 minor,
\
__ret; \
})
+int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype, struct bpf_prog *prog);
+int cgroup_bpf_prog_detach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype);
+int cgroup_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
#else
+struct bpf_prog;
struct cgroup_bpf {};
static inline void cgroup_bpf_put(struct cgroup *cgrp) {}
static inline int cgroup_bpf_inherit(struct cgroup *cgrp) { return 0; }
+static inline int cgroup_bpf_prog_attach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
+
+static inline int cgroup_bpf_prog_detach(const union bpf_attr *attr,
+ enum bpf_prog_type ptype)
+{
+ return -EINVAL;
+}
+
+static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
+ union bpf_attr __user *uattr)
+{
+ return -EINVAL;
+}
+
+static inline void bpf_cgroup_storage_set(struct bpf_cgroup_storage *storage) {}
+static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
+ struct bpf_map *map) { return 0; }
+static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
+ struct bpf_map *map) {}
+static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
+ struct bpf_prog *prog) { return 0; }
+static inline void bpf_cgroup_storage_free(
+ struct bpf_cgroup_storage *storage) {}
+
#define cgroup_bpf_enabled (0)
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 995c3b1e59bf..523481a3471b 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -23,7 +23,7 @@ struct bpf_prog;
struct bpf_map;
struct sock;
struct seq_file;
-struct btf;
+struct btf_type;
/* map is generic key/value storage optionally accesible by eBPF programs */
struct bpf_map_ops {
@@ -48,8 +48,9 @@ struct bpf_map_ops {
u32 (*map_fd_sys_lookup_elem)(void *ptr);
void (*map_seq_show_elem)(struct bpf_map *map, void *key,
struct seq_file *m);
- int (*map_check_btf)(const struct bpf_map *map, const struct btf *btf,
- u32 key_type_id, u32 value_type_id);
+ int (*map_check_btf)(const struct bpf_map *map,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type);
};
struct bpf_map {
@@ -85,6 +86,7 @@ struct bpf_map {
char name[BPF_OBJ_NAME_LEN];
};
+struct bpf_offload_dev;
struct bpf_offloaded_map;
struct bpf_map_dev_ops {
@@ -117,9 +119,13 @@ static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
{
- return map->ops->map_seq_show_elem && map->ops->map_check_btf;
+ return map->btf && map->ops->map_seq_show_elem;
}
+int map_check_no_btf(const struct bpf_map *map,
+ const struct btf_type *key_type,
+ const struct btf_type *value_type);
+
extern const struct bpf_map_ops bpf_map_offload_ops;
/* function argument constraints */
@@ -154,6 +160,7 @@ enum bpf_arg_type {
enum bpf_return_type {
RET_INTEGER, /* function returns integer */
RET_VOID, /* function doesn't return anything */
+ RET_PTR_TO_MAP_VALUE, /* returns a pointer to map elem value */
RET_PTR_TO_MAP_VALUE_OR_NULL, /* returns a pointer to map elem value or NULL */
};
@@ -281,6 +288,7 @@ struct bpf_prog_aux {
struct bpf_prog *prog;
struct user_struct *user;
u64 load_time; /* ns since boottime */
+ struct bpf_map *cgroup_storage;
char name[BPF_OBJ_NAME_LEN];
#ifdef CONFIG_SECURITY
void *security;
@@ -347,12 +355,17 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
* The 'struct bpf_prog_array *' should only be replaced with xchg()
* since other cpus are walking the array of pointers in parallel.
*/
+struct bpf_prog_array_item {
+ struct bpf_prog *prog;
+ struct bpf_cgroup_storage *cgroup_storage;
+};
+
struct bpf_prog_array {
struct rcu_head rcu;
- struct bpf_prog *progs[0];
+ struct bpf_prog_array_item items[0];
};
-struct bpf_prog_array __rcu *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
+struct bpf_prog_array *bpf_prog_array_alloc(u32 prog_cnt, gfp_t flags);
void bpf_prog_array_free(struct bpf_prog_array __rcu *progs);
int bpf_prog_array_length(struct bpf_prog_array __rcu *progs);
int bpf_prog_array_copy_to_user(struct bpf_prog_array __rcu *progs,
@@ -370,7 +383,8 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
#define __BPF_PROG_RUN_ARRAY(array, ctx, func, check_non_null) \
({ \
- struct bpf_prog **_prog, *__prog; \
+ struct bpf_prog_array_item *_item; \
+ struct bpf_prog *_prog; \
struct bpf_prog_array *_array; \
u32 _ret = 1; \
preempt_disable(); \
@@ -378,10 +392,11 @@ int bpf_prog_array_copy(struct bpf_prog_array __rcu *old_array,
_array = rcu_dereference(array); \
if (unlikely(check_non_null && !_array))\
goto _out; \
- _prog = _array->progs; \
- while ((__prog = READ_ONCE(*_prog))) { \
- _ret &= func(__prog, ctx); \
- _prog++; \
+ _item = &_array->items[0]; \
+ while ((_prog = READ_ONCE(_item->prog))) { \
+ bpf_cgroup_storage_set(_item->cgroup_storage); \
+ _ret &= func(_prog, ctx); \
+ _item++; \
} \
_out: \
rcu_read_unlock(); \
@@ -434,6 +449,8 @@ struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_precharge_memlock(u32 pages);
+int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
+void bpf_map_uncharge_memlock(struct bpf_map *map, u32 pages);
void *bpf_map_area_alloc(size_t size, int numa_node);
void bpf_map_area_free(void *base);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
@@ -488,12 +505,15 @@ void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
/* Map specifics */
struct xdp_buff;
+struct sk_buff;
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
void __dev_map_insert_ctx(struct bpf_map *map, u32 index);
void __dev_map_flush(struct bpf_map *map);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
struct net_device *dev_rx);
+int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
+ struct bpf_prog *xdp_prog);
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
void __cpu_map_insert_ctx(struct bpf_map *map, u32 index);
@@ -509,6 +529,7 @@ static inline int bpf_map_attr_numa_node(const union bpf_attr *attr)
}
struct bpf_prog *bpf_prog_get_type_path(const char *name, enum bpf_prog_type type);
+int array_map_alloc_check(union bpf_attr *attr);
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
@@ -586,6 +607,15 @@ int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
return 0;
}
+struct sk_buff;
+
+static inline int dev_map_generic_redirect(struct bpf_dtab_netdev *dst,
+ struct sk_buff *skb,
+ struct bpf_prog *xdp_prog)
+{
+ return 0;
+}
+
static inline
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
{
@@ -636,7 +666,15 @@ int bpf_map_offload_delete_elem(struct bpf_map *map, void *key);
int bpf_map_offload_get_next_key(struct bpf_map *map,
void *key, void *next_key);
-bool bpf_offload_dev_match(struct bpf_prog *prog, struct bpf_map *map);
+bool bpf_offload_prog_map_match(struct bpf_prog *prog, struct bpf_map *map);
+
+struct bpf_offload_dev *bpf_offload_dev_create(void);
+void bpf_offload_dev_destroy(struct bpf_offload_dev *offdev);
+int bpf_offload_dev_netdev_register(struct bpf_offload_dev *offdev,
+ struct net_device *netdev);
+void bpf_offload_dev_netdev_unregister(struct bpf_offload_dev *offdev,
+ struct net_device *netdev);
+bool bpf_offload_dev_match(struct bpf_prog *prog, struct net_device *netdev);
#if defined(CONFIG_NET) && defined(CONFIG_BPF_SYSCALL)
int bpf_prog_offload_init(struct bpf_prog *prog, union bpf_attr *attr);
@@ -684,6 +722,8 @@ static inline void bpf_map_offload_map_free(struct bpf_map *map)
struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key);
struct sock *__sock_hash_lookup_elem(struct bpf_map *map, void *key);
int sock_map_prog(struct bpf_map *map, struct bpf_prog *prog, u32 type);
+int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+ struct bpf_prog *prog);
#else
static inline struct sock *__sock_map_lookup_elem(struct bpf_map *map, u32 key)
{
@@ -702,6 +742,12 @@ static inline int sock_map_prog(struct bpf_map *map,
{
return -EOPNOTSUPP;
}
+
+static inline int sockmap_get_from_fd(const union bpf_attr *attr, int type,
+ struct bpf_prog *prog)
+{
+ return -EINVAL;
+}
#endif
#if defined(CONFIG_XDP_SOCKETS)
@@ -729,6 +775,33 @@ static inline void __xsk_map_flush(struct bpf_map *map)
}
#endif
+#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
+void bpf_sk_reuseport_detach(struct sock *sk);
+int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
+ void *value);
+int bpf_fd_reuseport_array_update_elem(struct bpf_map *map, void *key,
+ void *value, u64 map_flags);
+#else
+static inline void bpf_sk_reuseport_detach(struct sock *sk)
+{
+}
+
+#ifdef CONFIG_BPF_SYSCALL
+static inline int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map,
+ void *key, void *value)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int bpf_fd_reuseport_array_update_elem(struct bpf_map *map,
+ void *key, void *value,
+ u64 map_flags)
+{
+ return -EOPNOTSUPP;
+}
+#endif /* CONFIG_BPF_SYSCALL */
+#endif /* defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL) */
+
/* verifier prototypes for helper functions called from eBPF programs */
extern const struct bpf_func_proto bpf_map_lookup_elem_proto;
extern const struct bpf_func_proto bpf_map_update_elem_proto;
@@ -748,6 +821,8 @@ extern const struct bpf_func_proto bpf_sock_map_update_proto;
extern const struct bpf_func_proto bpf_sock_hash_update_proto;
extern const struct bpf_func_proto bpf_get_current_cgroup_id_proto;
+extern const struct bpf_func_proto bpf_get_local_storage_proto;
+
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
u64 bpf_user_rnd_u32(u64 r1, u64 r2, u64 r3, u64 r4, u64 r5);
diff --git a/include/linux/bpf_lirc.h b/include/linux/bpf_lirc.h
index 5f8a4283092d..9d9ff755ec29 100644
--- a/include/linux/bpf_lirc.h
+++ b/include/linux/bpf_lirc.h
@@ -5,11 +5,12 @@
#include <uapi/linux/bpf.h>
#ifdef CONFIG_BPF_LIRC_MODE2
-int lirc_prog_attach(const union bpf_attr *attr);
+int lirc_prog_attach(const union bpf_attr *attr, struct bpf_prog *prog);
int lirc_prog_detach(const union bpf_attr *attr);
int lirc_prog_query(const union bpf_attr *attr, union bpf_attr __user *uattr);
#else
-static inline int lirc_prog_attach(const union bpf_attr *attr)
+static inline int lirc_prog_attach(const union bpf_attr *attr,
+ struct bpf_prog *prog)
{
return -EINVAL;
}
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index c5700c2d5549..cd26c090e7c0 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -29,6 +29,9 @@ BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev)
#ifdef CONFIG_BPF_LIRC_MODE2
BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2)
#endif
+#ifdef CONFIG_INET
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport)
+#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops)
@@ -37,6 +40,9 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, perf_event_array_map_ops)
#ifdef CONFIG_CGROUPS
BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops)
#endif
+#ifdef CONFIG_CGROUP_BPF
+BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_STORAGE, cgroup_storage_map_ops)
+#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops)
@@ -57,4 +63,7 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_CPUMAP, cpu_map_ops)
#if defined(CONFIG_XDP_SOCKETS)
BPF_MAP_TYPE(BPF_MAP_TYPE_XSKMAP, xsk_map_ops)
#endif
+#ifdef CONFIG_INET
+BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops)
+#endif
#endif
diff --git a/include/linux/bpfilter.h b/include/linux/bpfilter.h
index 687b1760bb9f..f02cee0225d4 100644
--- a/include/linux/bpfilter.h
+++ b/include/linux/bpfilter.h
@@ -5,10 +5,10 @@
#include <uapi/linux/bpfilter.h>
struct sock;
-int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char *optval,
+int bpfilter_ip_set_sockopt(struct sock *sk, int optname, char __user *optval,
unsigned int optlen);
-int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char *optval,
- int *optlen);
+int bpfilter_ip_get_sockopt(struct sock *sk, int optname, char __user *optval,
+ int __user *optlen);
extern int (*bpfilter_process_sockopt)(struct sock *sk, int optname,
char __user *optval,
unsigned int optlen, bool is_set);
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index daa9234a9baf..949e9af8d9d6 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -45,6 +45,7 @@
#define PHY_ID_BCM7445 0x600d8510
#define PHY_ID_BCM_CYGNUS 0xae025200
+#define PHY_ID_BCM_OMEGA 0xae025100
#define PHY_BCM_OUI_MASK 0xfffffc00
#define PHY_BCM_OUI_1 0x00206000
diff --git a/include/linux/build-salt.h b/include/linux/build-salt.h
new file mode 100644
index 000000000000..bb007bd05e7a
--- /dev/null
+++ b/include/linux/build-salt.h
@@ -0,0 +1,20 @@
+#ifndef __BUILD_SALT_H
+#define __BUILD_SALT_H
+
+#include <linux/elfnote.h>
+
+#define LINUX_ELFNOTE_BUILD_SALT 0x100
+
+#ifdef __ASSEMBLER__
+
+#define BUILD_SALT \
+ ELFNOTE(Linux, LINUX_ELFNOTE_BUILD_SALT, .asciz CONFIG_BUILD_SALT)
+
+#else
+
+#define BUILD_SALT \
+ ELFNOTE32("Linux", LINUX_ELFNOTE_BUILD_SALT, CONFIG_BUILD_SALT)
+
+#endif
+
+#endif /* __BUILD_SALT_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 055aaf5ed9af..a83e1f632eb7 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -143,7 +143,12 @@ u8 can_dlc2len(u8 can_dlc);
/* map the sanitized data length to an appropriate data length code */
u8 can_len2dlc(u8 len);
-struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max);
+struct net_device *alloc_candev_mqs(int sizeof_priv, unsigned int echo_skb_max,
+ unsigned int txqs, unsigned int rxqs);
+#define alloc_candev(sizeof_priv, echo_skb_max) \
+ alloc_candev_mqs(sizeof_priv, echo_skb_max, 1, 1)
+#define alloc_candev_mq(sizeof_priv, echo_skb_max, count) \
+ alloc_candev_mqs(sizeof_priv, echo_skb_max, count, count)
void free_candev(struct net_device *dev);
/* a candev safe wrapper around netdev_priv */
diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
index e75dfd1f1dec..528271c60018 100644
--- a/include/linux/cdrom.h
+++ b/include/linux/cdrom.h
@@ -13,6 +13,7 @@
#include <linux/fs.h> /* not really needed, later.. */
#include <linux/list.h>
+#include <scsi/scsi_common.h>
#include <uapi/linux/cdrom.h>
struct packet_command
@@ -21,7 +22,7 @@ struct packet_command
unsigned char *buffer;
unsigned int buflen;
int stat;
- struct request_sense *sense;
+ struct scsi_sense_hdr *sshdr;
unsigned char data_direction;
int quiet;
int timeout;
diff --git a/include/linux/ceph/auth.h b/include/linux/ceph/auth.h
index e931da8424a4..6728c2ee0205 100644
--- a/include/linux/ceph/auth.h
+++ b/include/linux/ceph/auth.h
@@ -64,6 +64,10 @@ struct ceph_auth_client_ops {
/* ensure that an existing authorizer is up to date */
int (*update_authorizer)(struct ceph_auth_client *ac, int peer_type,
struct ceph_auth_handshake *auth);
+ int (*add_authorizer_challenge)(struct ceph_auth_client *ac,
+ struct ceph_authorizer *a,
+ void *challenge_buf,
+ int challenge_buf_len);
int (*verify_authorizer_reply)(struct ceph_auth_client *ac,
struct ceph_authorizer *a);
void (*invalidate_authorizer)(struct ceph_auth_client *ac,
@@ -118,6 +122,10 @@ void ceph_auth_destroy_authorizer(struct ceph_authorizer *a);
extern int ceph_auth_update_authorizer(struct ceph_auth_client *ac,
int peer_type,
struct ceph_auth_handshake *a);
+int ceph_auth_add_authorizer_challenge(struct ceph_auth_client *ac,
+ struct ceph_authorizer *a,
+ void *challenge_buf,
+ int challenge_buf_len);
extern int ceph_auth_verify_authorizer_reply(struct ceph_auth_client *ac,
struct ceph_authorizer *a);
extern void ceph_auth_invalidate_authorizer(struct ceph_auth_client *ac,
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index 3901927cf6a0..6b92b3395fa9 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -165,9 +165,9 @@ DEFINE_CEPH_FEATURE(58, 1, FS_FILE_LAYOUT_V2) // overlap
DEFINE_CEPH_FEATURE(59, 1, FS_BTIME)
DEFINE_CEPH_FEATURE(59, 1, FS_CHANGE_ATTR) // overlap
DEFINE_CEPH_FEATURE(59, 1, MSG_ADDR2) // overlap
-DEFINE_CEPH_FEATURE(60, 1, BLKIN_TRACING) // *do not share this bit*
+DEFINE_CEPH_FEATURE(60, 1, OSD_RECOVERY_DELETES) // *do not share this bit*
+DEFINE_CEPH_FEATURE(61, 1, CEPHX_V2) // *do not share this bit*
-DEFINE_CEPH_FEATURE(61, 1, RESERVED2) // unused, but slow down!
DEFINE_CEPH_FEATURE(62, 1, RESERVED) // do not use; used as a sentinal
DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facing
@@ -210,7 +210,8 @@ DEFINE_CEPH_FEATURE_DEPRECATED(63, 1, RESERVED_BROKEN, LUMINOUS) // client-facin
CEPH_FEATURE_SERVER_JEWEL | \
CEPH_FEATURE_MON_STATEFUL_SUB | \
CEPH_FEATURE_CRUSH_TUNABLES5 | \
- CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING)
+ CEPH_FEATURE_NEW_OSDOPREPLY_ENCODING | \
+ CEPH_FEATURE_CEPHX_V2)
#define CEPH_FEATURES_REQUIRED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR | \
diff --git a/include/linux/ceph/decode.h b/include/linux/ceph/decode.h
index d143ac8879c6..a6c2a48d42e0 100644
--- a/include/linux/ceph/decode.h
+++ b/include/linux/ceph/decode.h
@@ -194,16 +194,22 @@ ceph_decode_skip_n(p, end, sizeof(u8), bad)
} while (0)
/*
- * struct ceph_timespec <-> struct timespec
+ * struct ceph_timespec <-> struct timespec64
*/
-static inline void ceph_decode_timespec(struct timespec *ts,
- const struct ceph_timespec *tv)
+static inline void ceph_decode_timespec64(struct timespec64 *ts,
+ const struct ceph_timespec *tv)
{
- ts->tv_sec = (__kernel_time_t)le32_to_cpu(tv->tv_sec);
+ /*
+ * This will still overflow in year 2106. We could extend
+ * the protocol to steal two more bits from tv_nsec to
+ * add three more 136 year epochs after that the way ext4
+ * does if necessary.
+ */
+ ts->tv_sec = (time64_t)le32_to_cpu(tv->tv_sec);
ts->tv_nsec = (long)le32_to_cpu(tv->tv_nsec);
}
-static inline void ceph_encode_timespec(struct ceph_timespec *tv,
- const struct timespec *ts)
+static inline void ceph_encode_timespec64(struct ceph_timespec *tv,
+ const struct timespec64 *ts)
{
tv->tv_sec = cpu_to_le32((u32)ts->tv_sec);
tv->tv_nsec = cpu_to_le32((u32)ts->tv_nsec);
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index c7dfcb8a1fb2..fc2b4491ee0a 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -31,6 +31,9 @@ struct ceph_connection_operations {
struct ceph_auth_handshake *(*get_authorizer) (
struct ceph_connection *con,
int *proto, int force_new);
+ int (*add_authorizer_challenge)(struct ceph_connection *con,
+ void *challenge_buf,
+ int challenge_buf_len);
int (*verify_authorizer_reply) (struct ceph_connection *con);
int (*invalidate_authorizer)(struct ceph_connection *con);
@@ -286,9 +289,8 @@ struct ceph_connection {
attempt for this connection, client */
u32 peer_global_seq; /* peer's global seq for this connection */
+ struct ceph_auth_handshake *auth;
int auth_retry; /* true if we need a newer authorizer */
- void *auth_reply_buf; /* where to put the authorizer reply */
- int auth_reply_buf_len;
struct mutex mutex;
@@ -330,7 +332,7 @@ struct ceph_connection {
int in_base_pos; /* bytes read */
__le64 in_temp_ack; /* for reading an ack */
- struct timespec last_keepalive_ack; /* keepalive2 ack stamp */
+ struct timespec64 last_keepalive_ack; /* keepalive2 ack stamp */
struct delayed_work work; /* send|recv work */
unsigned long delay; /* current delay interval */
diff --git a/include/linux/ceph/msgr.h b/include/linux/ceph/msgr.h
index 73ae2a926548..9e50aede46c8 100644
--- a/include/linux/ceph/msgr.h
+++ b/include/linux/ceph/msgr.h
@@ -91,7 +91,7 @@ struct ceph_entity_inst {
#define CEPH_MSGR_TAG_SEQ 13 /* 64-bit int follows with seen seq number */
#define CEPH_MSGR_TAG_KEEPALIVE2 14 /* keepalive2 byte + ceph_timespec */
#define CEPH_MSGR_TAG_KEEPALIVE2_ACK 15 /* keepalive2 reply */
-
+#define CEPH_MSGR_TAG_CHALLENGE_AUTHORIZER 16 /* cephx v2 doing server challenge */
/*
* connection negotiation
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index 0d6ee04b4c41..02096da01845 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -199,7 +199,7 @@ struct ceph_osd_request {
/* set by submitter */
u64 r_snapid; /* for reads, CEPH_NOSNAP o/w */
struct ceph_snap_context *r_snapc; /* for writes */
- struct timespec r_mtime; /* ditto */
+ struct timespec64 r_mtime; /* ditto */
u64 r_data_offset; /* ditto */
bool r_linger; /* don't resend on failure */
@@ -253,7 +253,7 @@ struct ceph_osd_linger_request {
struct ceph_osd_request_target t;
u32 map_dne_bound;
- struct timespec mtime;
+ struct timespec64 mtime;
struct kref kref;
struct mutex lock;
@@ -508,7 +508,7 @@ extern int ceph_osdc_writepages(struct ceph_osd_client *osdc,
struct ceph_snap_context *sc,
u64 off, u64 len,
u32 truncate_seq, u64 truncate_size,
- struct timespec *mtime,
+ struct timespec64 *mtime,
struct page **pages, int nr_pages);
/* watch/notify */
@@ -528,12 +528,12 @@ int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
u64 notify_id,
u64 cookie,
void *payload,
- size_t payload_len);
+ u32 payload_len);
int ceph_osdc_notify(struct ceph_osd_client *osdc,
struct ceph_object_id *oid,
struct ceph_object_locator *oloc,
void *payload,
- size_t payload_len,
+ u32 payload_len,
u32 timeout,
struct page ***preply_pages,
size_t *preply_len);
diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h
index 7edcded07641..d0223364349f 100644
--- a/include/linux/ceph/pagelist.h
+++ b/include/linux/ceph/pagelist.h
@@ -68,7 +68,7 @@ static inline int ceph_pagelist_encode_8(struct ceph_pagelist *pl, u8 v)
return ceph_pagelist_append(pl, &v, 1);
}
static inline int ceph_pagelist_encode_string(struct ceph_pagelist *pl,
- char *s, size_t len)
+ char *s, u32 len)
{
int ret = ceph_pagelist_encode_32(pl, len);
if (ret)
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index c0e68f903011..ff20b677fb9f 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -438,6 +438,9 @@ struct cgroup {
/* used to store eBPF programs */
struct cgroup_bpf bpf;
+ /* If there is block congestion on this cgroup. */
+ atomic_t congestion_count;
+
/* ids of the ancestors at each level including self */
int ancestor_ids[];
};
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index c9fdf6f57913..32c553556bbd 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -554,6 +554,36 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
}
/**
+ * cgroup_ancestor - find ancestor of cgroup
+ * @cgrp: cgroup to find ancestor of
+ * @ancestor_level: level of ancestor to find starting from root
+ *
+ * Find ancestor of cgroup at specified level starting from root if it exists
+ * and return pointer to it. Return NULL if @cgrp doesn't have ancestor at
+ * @ancestor_level.
+ *
+ * This function is safe to call as long as @cgrp is accessible.
+ */
+static inline struct cgroup *cgroup_ancestor(struct cgroup *cgrp,
+ int ancestor_level)
+{
+ struct cgroup *ptr;
+
+ if (cgrp->level < ancestor_level)
+ return NULL;
+
+ for (ptr = cgrp;
+ ptr && ptr->level > ancestor_level;
+ ptr = cgroup_parent(ptr))
+ ;
+
+ if (ptr && ptr->level == ancestor_level)
+ return ptr;
+
+ return NULL;
+}
+
+/**
* task_under_cgroup_hierarchy - test task's membership of cgroup ancestry
* @task: the task to be tested
* @ancestor: possible ancestor of @task's cgroup
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index b7cfa037e593..08b1aa70a38d 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -38,6 +38,8 @@
#define CLK_IS_CRITICAL BIT(11) /* do not gate, ever */
/* parents need enable during gate/ungate, set rate and re-parent */
#define CLK_OPS_PARENT_ENABLE BIT(12)
+/* duty cycle call may be forwarded to the parent clock */
+#define CLK_DUTY_CYCLE_PARENT BIT(13)
struct clk;
struct clk_hw;
@@ -67,6 +69,17 @@ struct clk_rate_request {
};
/**
+ * struct clk_duty - Struture encoding the duty cycle ratio of a clock
+ *
+ * @num: Numerator of the duty cycle ratio
+ * @den: Denominator of the duty cycle ratio
+ */
+struct clk_duty {
+ unsigned int num;
+ unsigned int den;
+};
+
+/**
* struct clk_ops - Callback operations for hardware clocks; these are to
* be provided by the clock implementation, and will be called by drivers
* through the clk_* api.
@@ -169,6 +182,15 @@ struct clk_rate_request {
* by the second argument. Valid values for degrees are
* 0-359. Return 0 on success, otherwise -EERROR.
*
+ * @get_duty_cycle: Queries the hardware to get the current duty cycle ratio
+ * of a clock. Returned values denominator cannot be 0 and must be
+ * superior or equal to the numerator.
+ *
+ * @set_duty_cycle: Apply the duty cycle ratio to this clock signal specified by
+ * the numerator (2nd argurment) and denominator (3rd argument).
+ * Argument must be a valid ratio (denominator > 0
+ * and >= numerator) Return 0 on success, otherwise -EERROR.
+ *
* @init: Perform platform-specific initialization magic.
* This is not not used by any of the basic clock types.
* Please consider other ways of solving initialization problems
@@ -218,6 +240,10 @@ struct clk_ops {
unsigned long parent_accuracy);
int (*get_phase)(struct clk_hw *hw);
int (*set_phase)(struct clk_hw *hw, int degrees);
+ int (*get_duty_cycle)(struct clk_hw *hw,
+ struct clk_duty *duty);
+ int (*set_duty_cycle)(struct clk_hw *hw,
+ struct clk_duty *duty);
void (*init)(struct clk_hw *hw);
void (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
};
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 0dbd0885b2c2..4f750c481b82 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -142,6 +142,27 @@ int clk_set_phase(struct clk *clk, int degrees);
int clk_get_phase(struct clk *clk);
/**
+ * clk_set_duty_cycle - adjust the duty cycle ratio of a clock signal
+ * @clk: clock signal source
+ * @num: numerator of the duty cycle ratio to be applied
+ * @den: denominator of the duty cycle ratio to be applied
+ *
+ * Adjust the duty cycle of a clock signal by the specified ratio. Returns 0 on
+ * success, -EERROR otherwise.
+ */
+int clk_set_duty_cycle(struct clk *clk, unsigned int num, unsigned int den);
+
+/**
+ * clk_get_duty_cycle - return the duty cycle ratio of a clock signal
+ * @clk: clock signal source
+ * @scale: scaling factor to be applied to represent the ratio as an integer
+ *
+ * Returns the duty cycle ratio multiplied by the scale provided, otherwise
+ * returns -EERROR.
+ */
+int clk_get_scaled_duty_cycle(struct clk *clk, unsigned int scale);
+
+/**
* clk_is_match - check if two clk's point to the same hardware clock
* @p: clk compared against q
* @q: clk compared against p
@@ -183,6 +204,18 @@ static inline long clk_get_phase(struct clk *clk)
return -ENOTSUPP;
}
+static inline int clk_set_duty_cycle(struct clk *clk, unsigned int num,
+ unsigned int den)
+{
+ return -ENOTSUPP;
+}
+
+static inline unsigned int clk_get_scaled_duty_cycle(struct clk *clk,
+ unsigned int scale)
+{
+ return 0;
+}
+
static inline bool clk_is_match(const struct clk *p, const struct clk *q)
{
return p == q;
diff --git a/include/linux/clk/at91_pmc.h b/include/linux/clk/at91_pmc.h
index 6aca5ce8a99a..931ab05f771d 100644
--- a/include/linux/clk/at91_pmc.h
+++ b/include/linux/clk/at91_pmc.h
@@ -47,8 +47,10 @@
#define AT91_CKGR_MOR 0x20 /* Main Oscillator Register [not on SAM9RL] */
#define AT91_PMC_MOSCEN (1 << 0) /* Main Oscillator Enable */
#define AT91_PMC_OSCBYPASS (1 << 1) /* Oscillator Bypass */
+#define AT91_PMC_WAITMODE (1 << 2) /* Wait Mode Command */
#define AT91_PMC_MOSCRCEN (1 << 3) /* Main On-Chip RC Oscillator Enable [some SAM9] */
#define AT91_PMC_OSCOUNT (0xff << 8) /* Main Oscillator Start-up Time */
+#define AT91_PMC_KEY_MASK (0xff << 16)
#define AT91_PMC_KEY (0x37 << 16) /* MOR Writing Key */
#define AT91_PMC_MOSCSEL (1 << 24) /* Main Oscillator Selection [some SAM9] */
#define AT91_PMC_CFDEN (1 << 25) /* Clock Failure Detector Enable [some SAM9] */
@@ -155,6 +157,19 @@
#define AT91_PMC_GCKRDY (1 << 24) /* Generated Clocks */
#define AT91_PMC_IMR 0x6c /* Interrupt Mask Register */
+#define AT91_PMC_FSMR 0x70 /* Fast Startup Mode Register */
+#define AT91_PMC_FSTT(n) BIT(n)
+#define AT91_PMC_RTCAL BIT(17) /* RTC Alarm Enable */
+#define AT91_PMC_USBAL BIT(18) /* USB Resume Enable */
+#define AT91_PMC_SDMMC_CD BIT(19) /* SDMMC Card Detect Enable */
+#define AT91_PMC_LPM BIT(20) /* Low-power Mode */
+#define AT91_PMC_RXLP_MCE BIT(24) /* Backup UART Receive Enable */
+#define AT91_PMC_ACC_CE BIT(25) /* ACC Enable */
+
+#define AT91_PMC_FSPR 0x74 /* Fast Startup Polarity Reg */
+
+#define AT91_PMC_FS_INPUT_MASK 0x7ff
+
#define AT91_PMC_PLLICPR 0x80 /* PLL Charge Pump Current Register */
#define AT91_PMC_PROT 0xe4 /* Write Protect Mode Register [some SAM9] */
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 7dff1963c185..308918928767 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -194,6 +194,9 @@ extern void clocksource_suspend(void);
extern void clocksource_resume(void);
extern struct clocksource * __init clocksource_default_clock(void);
extern void clocksource_mark_unstable(struct clocksource *cs);
+extern void
+clocksource_start_suspend_timing(struct clocksource *cs, u64 start_cycles);
+extern u64 clocksource_stop_suspend_timing(struct clocksource *cs, u64 now);
extern u64
clocks_calc_max_nsecs(u32 mult, u32 shift, u32 maxadj, u64 mask, u64 *max_cycles);
diff --git a/include/linux/cma.h b/include/linux/cma.h
index bf90f0bb42bd..190184b5ff32 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -33,7 +33,7 @@ extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
const char *name,
struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
- gfp_t gfp_mask);
+ bool no_warn);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
diff --git a/include/linux/compat.h b/include/linux/compat.h
index b1a5562b3215..1a3c4f37e908 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -72,6 +72,9 @@
*/
#ifndef COMPAT_SYSCALL_DEFINEx
#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
+ __diag_push(); \
+ __diag_ignore(GCC, 8, "-Wattribute-alias", \
+ "Type aliasing is used to sanitize syscall arguments");\
asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
asmlinkage long compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
__attribute__((alias(__stringify(__se_compat_sys##name)))); \
@@ -80,8 +83,11 @@
asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
asmlinkage long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
{ \
- return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\
+ long ret = __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__));\
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ return ret; \
} \
+ __diag_pop(); \
static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
#endif /* COMPAT_SYSCALL_DEFINEx */
@@ -109,11 +115,6 @@ typedef compat_ulong_t compat_aio_context_t;
struct compat_sel_arg_struct;
struct rusage;
-struct compat_itimerspec {
- struct compat_timespec it_interval;
- struct compat_timespec it_value;
-};
-
struct compat_utimbuf {
compat_time_t actime;
compat_time_t modtime;
@@ -294,10 +295,6 @@ extern int compat_get_timespec(struct timespec *, const void __user *);
extern int compat_put_timespec(const struct timespec *, void __user *);
extern int compat_get_timeval(struct timeval *, const void __user *);
extern int compat_put_timeval(const struct timeval *, void __user *);
-extern int get_compat_itimerspec64(struct itimerspec64 *its,
- const struct compat_itimerspec __user *uits);
-extern int put_compat_itimerspec64(const struct itimerspec64 *its,
- struct compat_itimerspec __user *uits);
struct compat_iovec {
compat_uptr_t iov_base;
@@ -1022,6 +1019,17 @@ static inline struct compat_timeval ns_to_compat_timeval(s64 nsec)
return ctv;
}
+/*
+ * Kernel code should not call compat syscalls (i.e., compat_sys_xyzyyz())
+ * directly. Instead, use one of the functions which work equivalently, such
+ * as the kcompat_sys_xyzyyz() functions prototyped below.
+ */
+
+int kcompat_sys_statfs64(const char __user * pathname, compat_size_t sz,
+ struct compat_statfs64 __user * buf);
+int kcompat_sys_fstatfs64(unsigned int fd, compat_size_t sz,
+ struct compat_statfs64 __user * buf);
+
#else /* !CONFIG_COMPAT */
#define is_compat_task() (0)
diff --git a/include/linux/compat_time.h b/include/linux/compat_time.h
index 31f2774f1994..e70bfd1d2c3f 100644
--- a/include/linux/compat_time.h
+++ b/include/linux/compat_time.h
@@ -17,7 +17,16 @@ struct compat_timeval {
s32 tv_usec;
};
+struct compat_itimerspec {
+ struct compat_timespec it_interval;
+ struct compat_timespec it_value;
+};
+
extern int compat_get_timespec64(struct timespec64 *, const void __user *);
extern int compat_put_timespec64(const struct timespec64 *, void __user *);
+extern int get_compat_itimerspec64(struct itimerspec64 *its,
+ const struct compat_itimerspec __user *uits);
+extern int put_compat_itimerspec64(const struct itimerspec64 *its,
+ struct compat_itimerspec __user *uits);
#endif /* _LINUX_COMPAT_TIME_H */
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 7087446c24c8..b1ce500fe8b3 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -6,11 +6,7 @@
/* Some compiler specific definitions are overwritten here
* for Clang compiler
*/
-
-#ifdef uninitialized_var
-#undef uninitialized_var
#define uninitialized_var(x) x = *(&(x))
-#endif
/* same as gcc, this was present in clang-2.6 so we can assume it works
* with any version that can compile the kernel
@@ -25,14 +21,8 @@
#define __SANITIZE_ADDRESS__
#endif
-#undef __no_sanitize_address
#define __no_sanitize_address __attribute__((no_sanitize("address")))
-/* Clang doesn't have a way to turn it off per-function, yet. */
-#ifdef __noretpoline
-#undef __noretpoline
-#endif
-
/*
* Not all versions of clang implement the the type-generic versions
* of the builtin overflow checkers. Fortunately, clang implements
@@ -40,9 +30,17 @@
* checks. Unfortunately, we don't know which version of gcc clang
* pretends to be, so the macro may or may not be defined.
*/
-#undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
#if __has_builtin(__builtin_mul_overflow) && \
__has_builtin(__builtin_add_overflow) && \
__has_builtin(__builtin_sub_overflow)
#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
+
+/* The following are for compatibility with GCC, from compiler-gcc.h,
+ * and may be redefined here because they should not be shared with other
+ * compilers, like ICC.
+ */
+#define barrier() __asm__ __volatile__("" : : : "memory")
+#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
+#define __assume_aligned(a, ...) \
+ __attribute__((__assume_aligned__(a, ## __VA_ARGS__)))
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index f1a7492a5cc8..763bbad1e258 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -10,6 +10,10 @@
+ __GNUC_MINOR__ * 100 \
+ __GNUC_PATCHLEVEL__)
+#if GCC_VERSION < 40600
+# error Sorry, your compiler is too old - please upgrade it.
+#endif
+
/* Optimization barrier */
/* The "volatile" is due to gcc bugs */
@@ -58,6 +62,12 @@
#define OPTIMIZER_HIDE_VAR(var) \
__asm__ ("" : "=r" (var) : "0" (var))
+/*
+ * A trick to suppress uninitialized variable warning without generating any
+ * code
+ */
+#define uninitialized_var(x) x = x
+
#ifdef __CHECKER__
#define __must_be_array(a) 0
#else
@@ -65,34 +75,6 @@
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
#endif
-/*
- * Force always-inline if the user requests it so via the .config,
- * or if gcc is too old.
- * GCC does not warn about unused static inline functions for
- * -Wunused-function. This turns out to avoid the need for complex #ifdef
- * directives. Suppress the warning in clang as well by using "unused"
- * function attribute, which is redundant but not harmful for gcc.
- */
-#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
- !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
-#define inline inline __attribute__((always_inline,unused)) notrace
-#define __inline__ __inline__ __attribute__((always_inline,unused)) notrace
-#define __inline __inline __attribute__((always_inline,unused)) notrace
-#else
-/* A lot of inline functions can cause havoc with function tracing */
-#define inline inline __attribute__((unused)) notrace
-#define __inline__ __inline__ __attribute__((unused)) notrace
-#define __inline __inline __attribute__((unused)) notrace
-#endif
-
-#define __always_inline inline __attribute__((always_inline))
-#define noinline __attribute__((noinline))
-
-#define __deprecated __attribute__((deprecated))
-#define __packed __attribute__((packed))
-#define __weak __attribute__((weak))
-#define __alias(symbol) __attribute__((alias(#symbol)))
-
#ifdef RETPOLINE
#define __noretpoline __attribute__((indirect_branch("keep")))
#endif
@@ -111,105 +93,20 @@
*/
#define __naked __attribute__((naked)) noinline __noclone notrace
-#define __noreturn __attribute__((noreturn))
-
-/*
- * From the GCC manual:
- *
- * Many functions have no effects except the return value and their
- * return value depends only on the parameters and/or global
- * variables. Such a function can be subject to common subexpression
- * elimination and loop optimization just as an arithmetic operator
- * would be.
- * [...]
- */
-#define __pure __attribute__((pure))
-#define __aligned(x) __attribute__((aligned(x)))
-#define __aligned_largest __attribute__((aligned))
-#define __printf(a, b) __attribute__((format(printf, a, b)))
-#define __scanf(a, b) __attribute__((format(scanf, a, b)))
-#define __attribute_const__ __attribute__((__const__))
-#define __maybe_unused __attribute__((unused))
-#define __always_unused __attribute__((unused))
-#define __mode(x) __attribute__((mode(x)))
-
-/* gcc version specific checks */
-
-#if GCC_VERSION < 30200
-# error Sorry, your compiler is too old - please upgrade it.
-#endif
-
-#if GCC_VERSION < 30300
-# define __used __attribute__((__unused__))
-#else
-# define __used __attribute__((__used__))
-#endif
-
-#ifdef CONFIG_GCOV_KERNEL
-# if GCC_VERSION < 30400
-# error "GCOV profiling support for gcc versions below 3.4 not included"
-# endif /* __GNUC_MINOR__ */
-#endif /* CONFIG_GCOV_KERNEL */
-
-#if GCC_VERSION >= 30400
-#define __must_check __attribute__((warn_unused_result))
-#define __malloc __attribute__((__malloc__))
-#endif
-
-#if GCC_VERSION >= 40000
-
-/* GCC 4.1.[01] miscompiles __weak */
-#ifdef __KERNEL__
-# if GCC_VERSION >= 40100 && GCC_VERSION <= 40101
-# error Your version of gcc miscompiles the __weak directive
-# endif
-#endif
-
-#define __used __attribute__((__used__))
-#define __compiler_offsetof(a, b) \
- __builtin_offsetof(a, b)
-
-#if GCC_VERSION >= 40100
-# define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
-#endif
-
-#if GCC_VERSION >= 40300
-/* Mark functions as cold. gcc will assume any path leading to a call
- * to them will be unlikely. This means a lot of manual unlikely()s
- * are unnecessary now for any paths leading to the usual suspects
- * like BUG(), printk(), panic() etc. [but let's keep them for now for
- * older compilers]
- *
- * Early snapshots of gcc 4.3 don't support this and we can't detect this
- * in the preprocessor, but we can live with this because they're unreleased.
- * Maketime probing would be overkill here.
- *
- * gcc also has a __attribute__((__hot__)) to move hot functions into
- * a special section, but I don't see any sense in this right now in
- * the kernel context
- */
-#define __cold __attribute__((__cold__))
-
#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-#ifndef __CHECKER__
-# define __compiletime_warning(message) __attribute__((warning(message)))
-# define __compiletime_error(message) __attribute__((error(message)))
-#endif /* __CHECKER__ */
-#endif /* GCC_VERSION >= 40300 */
-
-#if GCC_VERSION >= 40400
#define __optimize(level) __attribute__((__optimize__(level)))
-#define __nostackprotector __optimize("no-stack-protector")
-#endif /* GCC_VERSION >= 40400 */
-#if GCC_VERSION >= 40500
+#define __compiletime_object_size(obj) __builtin_object_size(obj, 0)
#ifndef __CHECKER__
+#define __compiletime_warning(message) __attribute__((warning(message)))
+#define __compiletime_error(message) __attribute__((error(message)))
+
#ifdef LATENT_ENTROPY_PLUGIN
#define __latent_entropy __attribute__((latent_entropy))
#endif
-#endif
+#endif /* __CHECKER__ */
/*
* calling noreturn functions, __builtin_unreachable() and __builtin_trap()
@@ -247,10 +144,6 @@
#define randomized_struct_fields_end } __randomize_layout;
#endif
-#endif /* GCC_VERSION >= 40500 */
-
-#if GCC_VERSION >= 40600
-
/*
* When used with Link Time Optimization, gcc can optimize away C functions or
* variables which are referenced only from assembly code. __visible tells the
@@ -259,8 +152,7 @@
*/
#define __visible __attribute__((externally_visible))
-#endif /* GCC_VERSION >= 40600 */
-
+/* gcc version specific checks */
#if GCC_VERSION >= 40900 && !defined(__CHECKER__)
/*
@@ -294,10 +186,8 @@
* folding in __builtin_bswap*() (yet), so don't set these for it.
*/
#if defined(CONFIG_ARCH_USE_BUILTIN_BSWAP) && !defined(__CHECKER__)
-#if GCC_VERSION >= 40400
#define __HAVE_BUILTIN_BSWAP32__
#define __HAVE_BUILTIN_BSWAP64__
-#endif
#if GCC_VERSION >= 40800
#define __HAVE_BUILTIN_BSWAP16__
#endif
@@ -326,10 +216,9 @@
* https://gcc.gnu.org/onlinedocs/gcc/Designated-Inits.html
*/
#define __designated_init __attribute__((designated_init))
+#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
#endif
-#endif /* gcc version >= 40000 specific checks */
-
#if !defined(__noclone)
#define __noclone /* not needed */
#endif
@@ -339,11 +228,23 @@
#endif
/*
- * A trick to suppress uninitialized variable warning without generating any
- * code
+ * Turn individual warnings and errors on and off locally, depending
+ * on version.
*/
-#define uninitialized_var(x) x = x
+#define __diag_GCC(version, severity, s) \
+ __diag_GCC_ ## version(__diag_GCC_ ## severity s)
-#if GCC_VERSION >= 50100
-#define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1
+/* Severity used in pragma directives */
+#define __diag_GCC_ignore ignored
+#define __diag_GCC_warn warning
+#define __diag_GCC_error error
+
+#define __diag_str1(s) #s
+#define __diag_str(s) __diag_str1(s)
+#define __diag(s) _Pragma(__diag_str(GCC diagnostic s))
+
+#if GCC_VERSION >= 80000
+#define __diag_GCC_8(s) __diag(s)
+#else
+#define __diag_GCC_8(s)
#endif
diff --git a/include/linux/compiler-intel.h b/include/linux/compiler-intel.h
index 547cdc920a3c..4c7f9befa9f6 100644
--- a/include/linux/compiler-intel.h
+++ b/include/linux/compiler-intel.h
@@ -14,10 +14,6 @@
/* Intel ECC compiler doesn't support gcc specific asm stmts.
* It uses intrinsics to do the equivalent things.
*/
-#undef barrier
-#undef barrier_data
-#undef RELOC_HIDE
-#undef OPTIMIZER_HIDE_VAR
#define barrier() __memory_barrier()
#define barrier_data(ptr) barrier()
@@ -38,13 +34,12 @@
#endif
-#ifndef __HAVE_BUILTIN_BSWAP16__
/* icc has this, but it's called _bswap16 */
#define __HAVE_BUILTIN_BSWAP16__
#define __builtin_bswap16 _bswap16
-#endif
-/*
- * icc defines __GNUC__, but does not implement the builtin overflow checkers.
+/* The following are for compatibility with GCC, from compiler-gcc.h,
+ * and may be redefined here because they should not be shared with other
+ * compilers, like clang.
*/
-#undef COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW
+#define __visible __attribute__((externally_visible))
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 42506e4d1f53..681d866efb1e 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -280,6 +280,25 @@ unsigned long read_word_at_a_time(const void *addr)
#endif /* __KERNEL__ */
+/*
+ * Force the compiler to emit 'sym' as a symbol, so that we can reference
+ * it from inline assembler. Necessary in case 'sym' could be inlined
+ * otherwise, or eliminated entirely due to lack of references that are
+ * visible to the compiler.
+ */
+#define __ADDRESSABLE(sym) \
+ static void * __attribute__((section(".discard.addressable"), used)) \
+ __PASTE(__addressable_##sym, __LINE__) = (void *)&sym;
+
+/**
+ * offset_to_ptr - convert a relative memory offset to an absolute pointer
+ * @off: the address of the 32-bit offset value
+ */
+static inline void *offset_to_ptr(const int *off)
+{
+ return (void *)((unsigned long)off + *off);
+}
+
#endif /* __ASSEMBLY__ */
#ifndef __optimize
@@ -313,7 +332,7 @@ unsigned long read_word_at_a_time(const void *addr)
#ifdef __OPTIMIZE__
# define __compiletime_assert(condition, msg, prefix, suffix) \
do { \
- bool __cond = !(condition); \
+ int __cond = !(condition); \
extern void prefix ## suffix(void) __compiletime_error(msg); \
if (__cond) \
prefix ## suffix(); \
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index 6b79a9bba9a7..3525c179698c 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -54,32 +54,32 @@ extern void __chk_io_ptr(const volatile void __iomem *);
#ifdef __KERNEL__
-#ifdef __GNUC__
+/* Compiler specific macros. */
+#ifdef __clang__
+#include <linux/compiler-clang.h>
+#elif defined(__INTEL_COMPILER)
+#include <linux/compiler-intel.h>
+#elif defined(__GNUC__)
+/* The above compilers also define __GNUC__, so order is important here. */
#include <linux/compiler-gcc.h>
-#endif
-
-#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
-#define notrace __attribute__((hotpatch(0,0)))
#else
-#define notrace __attribute__((no_instrument_function))
+#error "Unknown compiler"
#endif
-/* Intel compiler defines __GNUC__. So we will overwrite implementations
- * coming from above header files here
- */
-#ifdef __INTEL_COMPILER
-# include <linux/compiler-intel.h>
-#endif
-
-/* Clang compiler defines __GNUC__. So we will overwrite implementations
- * coming from above header files here
+/*
+ * Some architectures need to provide custom definitions of macros provided
+ * by linux/compiler-*.h, and can do so using asm/compiler.h. We include that
+ * conditionally rather than using an asm-generic wrapper in order to avoid
+ * build failures if any C compilation, which will include this file via an
+ * -include argument in c_flags, occurs prior to the asm-generic wrappers being
+ * generated.
*/
-#ifdef __clang__
-#include <linux/compiler-clang.h>
+#ifdef CONFIG_HAVE_ARCH_COMPILER_H
+#include <asm/compiler.h>
#endif
/*
- * Generic compiler-dependent macros required for kernel
+ * Generic compiler-independent macros required for kernel
* build go below this comment. Actual compiler/compiler version
* specific implementations come from the above header files
*/
@@ -106,169 +106,172 @@ struct ftrace_likely_data {
unsigned long constant;
};
+/* Don't. Just don't. */
+#define __deprecated
+#define __deprecated_for_modules
+
#endif /* __KERNEL__ */
#endif /* __ASSEMBLY__ */
-#ifdef __KERNEL__
/*
- * Allow us to mark functions as 'deprecated' and have gcc emit a nice
- * warning for each use, in hopes of speeding the functions removal.
- * Usage is:
- * int __deprecated foo(void)
+ * The below symbols may be defined for one or more, but not ALL, of the above
+ * compilers. We don't consider that to be an error, so set them to nothing.
+ * For example, some of them are for compiler specific plugins.
*/
-#ifndef __deprecated
-# define __deprecated /* unimplemented */
-#endif
-
-#ifdef MODULE
-#define __deprecated_for_modules __deprecated
-#else
-#define __deprecated_for_modules
-#endif
-
-#ifndef __must_check
-#define __must_check
-#endif
-
-#ifndef CONFIG_ENABLE_MUST_CHECK
-#undef __must_check
-#define __must_check
-#endif
-#ifndef CONFIG_ENABLE_WARN_DEPRECATED
-#undef __deprecated
-#undef __deprecated_for_modules
-#define __deprecated
-#define __deprecated_for_modules
+#ifndef __designated_init
+# define __designated_init
#endif
-#ifndef __malloc
-#define __malloc
+#ifndef __latent_entropy
+# define __latent_entropy
#endif
-/*
- * Allow us to avoid 'defined but not used' warnings on functions and data,
- * as well as force them to be emitted to the assembly file.
- *
- * As of gcc 3.4, static functions that are not marked with attribute((used))
- * may be elided from the assembly file. As of gcc 3.4, static data not so
- * marked will not be elided, but this may change in a future gcc version.
- *
- * NOTE: Because distributions shipped with a backported unit-at-a-time
- * compiler in gcc 3.3, we must define __used to be __attribute__((used))
- * for gcc >=3.3 instead of 3.4.
- *
- * In prior versions of gcc, such functions and data would be emitted, but
- * would be warned about except with attribute((unused)).
- *
- * Mark functions that are referenced only in inline assembly as __used so
- * the code is emitted even though it appears to be unreferenced.
- */
-#ifndef __used
-# define __used /* unimplemented */
+#ifndef __randomize_layout
+# define __randomize_layout __designated_init
#endif
-#ifndef __maybe_unused
-# define __maybe_unused /* unimplemented */
+#ifndef __no_randomize_layout
+# define __no_randomize_layout
#endif
-#ifndef __always_unused
-# define __always_unused /* unimplemented */
+#ifndef randomized_struct_fields_start
+# define randomized_struct_fields_start
+# define randomized_struct_fields_end
#endif
-#ifndef noinline
-#define noinline
+#ifndef __visible
+#define __visible
#endif
/*
- * Rather then using noinline to prevent stack consumption, use
- * noinline_for_stack instead. For documentation reasons.
+ * Assume alignment of return value.
*/
-#define noinline_for_stack noinline
-
-#ifndef __always_inline
-#define __always_inline inline
+#ifndef __assume_aligned
+#define __assume_aligned(a, ...)
#endif
-#endif /* __KERNEL__ */
+/* Are two types/vars the same type (ignoring qualifiers)? */
+#define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+
+/* Is this type a native word size -- useful for atomic operations */
+#define __native_word(t) \
+ (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || \
+ sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
-/*
- * From the GCC manual:
- *
- * Many functions do not examine any values except their arguments,
- * and have no effects except the return value. Basically this is
- * just slightly more strict class than the `pure' attribute above,
- * since function is not allowed to read global memory.
- *
- * Note that a function that has pointer arguments and examines the
- * data pointed to must _not_ be declared `const'. Likewise, a
- * function that calls a non-`const' function usually must not be
- * `const'. It does not make sense for a `const' function to return
- * `void'.
- */
#ifndef __attribute_const__
-# define __attribute_const__ /* unimplemented */
+#define __attribute_const__ __attribute__((__const__))
#endif
-#ifndef __designated_init
-# define __designated_init
+#ifndef __noclone
+#define __noclone
#endif
-#ifndef __latent_entropy
-# define __latent_entropy
+/* Helpers for emitting diagnostics in pragmas. */
+#ifndef __diag
+#define __diag(string)
#endif
-#ifndef __randomize_layout
-# define __randomize_layout __designated_init
+#ifndef __diag_GCC
+#define __diag_GCC(version, severity, string)
#endif
-#ifndef __no_randomize_layout
-# define __no_randomize_layout
-#endif
+#define __diag_push() __diag(push)
+#define __diag_pop() __diag(pop)
-#ifndef randomized_struct_fields_start
-# define randomized_struct_fields_start
-# define randomized_struct_fields_end
-#endif
+#define __diag_ignore(compiler, version, option, comment) \
+ __diag_ ## compiler(version, ignore, option)
+#define __diag_warn(compiler, version, option, comment) \
+ __diag_ ## compiler(version, warn, option)
+#define __diag_error(compiler, version, option, comment) \
+ __diag_ ## compiler(version, error, option)
/*
- * Tell gcc if a function is cold. The compiler will assume any path
- * directly leading to the call is unlikely.
+ * From the GCC manual:
+ *
+ * Many functions have no effects except the return value and their
+ * return value depends only on the parameters and/or global
+ * variables. Such a function can be subject to common subexpression
+ * elimination and loop optimization just as an arithmetic operator
+ * would be.
+ * [...]
*/
-
-#ifndef __cold
-#define __cold
+#define __pure __attribute__((pure))
+#define __aligned(x) __attribute__((aligned(x)))
+#define __aligned_largest __attribute__((aligned))
+#define __printf(a, b) __attribute__((format(printf, a, b)))
+#define __scanf(a, b) __attribute__((format(scanf, a, b)))
+#define __maybe_unused __attribute__((unused))
+#define __always_unused __attribute__((unused))
+#define __mode(x) __attribute__((mode(x)))
+#define __malloc __attribute__((__malloc__))
+#define __used __attribute__((__used__))
+#define __noreturn __attribute__((noreturn))
+#define __packed __attribute__((packed))
+#define __weak __attribute__((weak))
+#define __alias(symbol) __attribute__((alias(#symbol)))
+#define __cold __attribute__((cold))
+#define __section(S) __attribute__((__section__(#S)))
+
+
+#ifdef CONFIG_ENABLE_MUST_CHECK
+#define __must_check __attribute__((warn_unused_result))
+#else
+#define __must_check
#endif
-/* Simple shorthand for a section definition */
-#ifndef __section
-# define __section(S) __attribute__ ((__section__(#S)))
+#if defined(CC_USING_HOTPATCH) && !defined(__CHECKER__)
+#define notrace __attribute__((hotpatch(0, 0)))
+#else
+#define notrace __attribute__((no_instrument_function))
#endif
-#ifndef __visible
-#define __visible
-#endif
+#define __compiler_offsetof(a, b) __builtin_offsetof(a, b)
-#ifndef __nostackprotector
-# define __nostackprotector
+/*
+ * Feature detection for gnu_inline (gnu89 extern inline semantics). Either
+ * __GNUC_STDC_INLINE__ is defined (not using gnu89 extern inline semantics,
+ * and we opt in to the gnu89 semantics), or __GNUC_STDC_INLINE__ is not
+ * defined so the gnu89 semantics are the default.
+ */
+#ifdef __GNUC_STDC_INLINE__
+# define __gnu_inline __attribute__((gnu_inline))
+#else
+# define __gnu_inline
#endif
/*
- * Assume alignment of return value.
+ * Force always-inline if the user requests it so via the .config.
+ * GCC does not warn about unused static inline functions for
+ * -Wunused-function. This turns out to avoid the need for complex #ifdef
+ * directives. Suppress the warning in clang as well by using "unused"
+ * function attribute, which is redundant but not harmful for gcc.
+ * Prefer gnu_inline, so that extern inline functions do not emit an
+ * externally visible function. This makes extern inline behave as per gnu89
+ * semantics rather than c99. This prevents multiple symbol definition errors
+ * of extern inline functions at link time.
+ * A lot of inline functions can cause havoc with function tracing.
*/
-#ifndef __assume_aligned
-#define __assume_aligned(a, ...)
+#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
+ !defined(CONFIG_OPTIMIZE_INLINING)
+#define inline \
+ inline __attribute__((always_inline, unused)) notrace __gnu_inline
+#else
+#define inline inline __attribute__((unused)) notrace __gnu_inline
#endif
+#define __inline__ inline
+#define __inline inline
+#define noinline __attribute__((noinline))
-/* Are two types/vars the same type (ignoring qualifiers)? */
-#ifndef __same_type
-# define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
+#ifndef __always_inline
+#define __always_inline inline __attribute__((always_inline))
#endif
-/* Is this type a native word size -- useful for atomic operations */
-#ifndef __native_word
-# define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
-#endif
+/*
+ * Rather then using noinline to prevent stack consumption, use
+ * noinline_for_stack instead. For documentation reasons.
+ */
+#define noinline_for_stack noinline
#endif /* __LINUX_COMPILER_TYPES_H */
diff --git a/include/linux/console.h b/include/linux/console.h
index dfd6b0e97855..ec9bdb3d7bab 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -14,6 +14,7 @@
#ifndef _LINUX_CONSOLE_H_
#define _LINUX_CONSOLE_H_ 1
+#include <linux/atomic.h>
#include <linux/types.h>
struct vc_data;
@@ -21,6 +22,7 @@ struct console_font_op;
struct console_font;
struct module;
struct tty_struct;
+struct notifier_block;
/*
* this is what the terminal answers to a ESC-Z or csi0c query.
@@ -200,11 +202,14 @@ void vcs_make_sysfs(int index);
void vcs_remove_sysfs(int index);
/* Some debug stub to catch some of the obvious races in the VT code */
-#if 1
-#define WARN_CONSOLE_UNLOCKED() WARN_ON(!is_console_locked() && !oops_in_progress)
-#else
-#define WARN_CONSOLE_UNLOCKED()
-#endif
+#define WARN_CONSOLE_UNLOCKED() \
+ WARN_ON(!atomic_read(&ignore_console_lock_warning) && \
+ !is_console_locked() && !oops_in_progress)
+/*
+ * Increment ignore_console_lock_warning if you need to quiet
+ * WARN_CONSOLE_UNLOCKED() for debugging purposes.
+ */
+extern atomic_t ignore_console_lock_warning;
/* VESA Blanking Levels */
#define VESA_NO_BLANKING 0
@@ -220,4 +225,8 @@ static inline bool vgacon_text_force(void) { return false; }
extern void console_init(void);
+/* For deferred console takeover */
+void dummycon_register_output_notifier(struct notifier_block *nb);
+void dummycon_unregister_output_notifier(struct notifier_block *nb);
+
#endif /* _LINUX_CONSOLE_H */
diff --git a/include/linux/console_struct.h b/include/linux/console_struct.h
index c0ec478ea5bf..fea64f2692a0 100644
--- a/include/linux/console_struct.h
+++ b/include/linux/console_struct.h
@@ -17,8 +17,8 @@
#include <linux/vt.h>
#include <linux/workqueue.h>
-struct vt_struct;
struct uni_pagedir;
+struct uni_screen;
#define NPAR 16
@@ -140,6 +140,7 @@ struct vc_data {
struct vc_data **vc_display_fg; /* [!] Ptr to var holding fg console for this display */
struct uni_pagedir *vc_uni_pagedir;
struct uni_pagedir **vc_uni_pagedir_loc; /* [!] Location of uni_pagedir variable for this console */
+ struct uni_screen *vc_uni_screen; /* unicode screen content */
bool vc_panic_force_write; /* when oops/panic this VC can accept forced output/blanking */
/* additional information is in vt_kern.h */
};
@@ -148,7 +149,7 @@ struct vc {
struct vc_data *d;
struct work_struct SAK_work;
- /* might add scrmem, vt_struct, kbd at some time,
+ /* might add scrmem, kbd at some time,
to have everything in one place - the disadvantage
would be that vc_cons etc can no longer be static */
};
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index c265e0468414..d828a6efe0b1 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -40,6 +40,7 @@ enum coresight_dev_type {
CORESIGHT_DEV_TYPE_LINK,
CORESIGHT_DEV_TYPE_LINKSINK,
CORESIGHT_DEV_TYPE_SOURCE,
+ CORESIGHT_DEV_TYPE_HELPER,
};
enum coresight_dev_subtype_sink {
@@ -62,19 +63,30 @@ enum coresight_dev_subtype_source {
CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE,
};
+enum coresight_dev_subtype_helper {
+ CORESIGHT_DEV_SUBTYPE_HELPER_NONE,
+ CORESIGHT_DEV_SUBTYPE_HELPER_CATU,
+};
+
/**
- * struct coresight_dev_subtype - further characterisation of a type
+ * union coresight_dev_subtype - further characterisation of a type
* @sink_subtype: type of sink this component is, as defined
- by @coresight_dev_subtype_sink.
+ * by @coresight_dev_subtype_sink.
* @link_subtype: type of link this component is, as defined
- by @coresight_dev_subtype_link.
+ * by @coresight_dev_subtype_link.
* @source_subtype: type of source this component is, as defined
- by @coresight_dev_subtype_source.
+ * by @coresight_dev_subtype_source.
+ * @helper_subtype: type of helper this component is, as defined
+ * by @coresight_dev_subtype_helper.
*/
-struct coresight_dev_subtype {
- enum coresight_dev_subtype_sink sink_subtype;
- enum coresight_dev_subtype_link link_subtype;
+union coresight_dev_subtype {
+ /* We have some devices which acts as LINK and SINK */
+ struct {
+ enum coresight_dev_subtype_sink sink_subtype;
+ enum coresight_dev_subtype_link link_subtype;
+ };
enum coresight_dev_subtype_source source_subtype;
+ enum coresight_dev_subtype_helper helper_subtype;
};
/**
@@ -87,7 +99,6 @@ struct coresight_dev_subtype {
* @child_ports:child component port number the current component is
connected to.
* @nr_outport: number of output ports for this component.
- * @clk: The clock this component is associated to.
*/
struct coresight_platform_data {
int cpu;
@@ -97,7 +108,6 @@ struct coresight_platform_data {
const char **child_names;
int *child_ports;
int nr_outport;
- struct clk *clk;
};
/**
@@ -113,7 +123,7 @@ struct coresight_platform_data {
*/
struct coresight_desc {
enum coresight_dev_type type;
- struct coresight_dev_subtype subtype;
+ union coresight_dev_subtype subtype;
const struct coresight_ops *ops;
struct coresight_platform_data *pdata;
struct device *dev;
@@ -157,7 +167,7 @@ struct coresight_device {
int nr_inport;
int nr_outport;
enum coresight_dev_type type;
- struct coresight_dev_subtype subtype;
+ union coresight_dev_subtype subtype;
const struct coresight_ops *ops;
struct device dev;
atomic_t *refcnt;
@@ -171,6 +181,7 @@ struct coresight_device {
#define source_ops(csdev) csdev->ops->source_ops
#define sink_ops(csdev) csdev->ops->sink_ops
#define link_ops(csdev) csdev->ops->link_ops
+#define helper_ops(csdev) csdev->ops->helper_ops
/**
* struct coresight_ops_sink - basic operations for a sink
@@ -230,10 +241,25 @@ struct coresight_ops_source {
struct perf_event *event);
};
+/**
+ * struct coresight_ops_helper - Operations for a helper device.
+ *
+ * All operations could pass in a device specific data, which could
+ * help the helper device to determine what to do.
+ *
+ * @enable : Enable the device
+ * @disable : Disable the device
+ */
+struct coresight_ops_helper {
+ int (*enable)(struct coresight_device *csdev, void *data);
+ int (*disable)(struct coresight_device *csdev, void *data);
+};
+
struct coresight_ops {
const struct coresight_ops_sink *sink_ops;
const struct coresight_ops_link *link_ops;
const struct coresight_ops_source *source_ops;
+ const struct coresight_ops_helper *helper_ops;
};
#ifdef CONFIG_CORESIGHT
@@ -267,24 +293,4 @@ static inline struct coresight_platform_data *of_get_coresight_platform_data(
struct device *dev, const struct device_node *node) { return NULL; }
#endif
-#ifdef CONFIG_PID_NS
-static inline unsigned long
-coresight_vpid_to_pid(unsigned long vpid)
-{
- struct task_struct *task = NULL;
- unsigned long pid = 0;
-
- rcu_read_lock();
- task = find_task_by_vpid(vpid);
- if (task)
- pid = task_pid_nr(task);
- rcu_read_unlock();
-
- return pid;
-}
-#else
-static inline unsigned long
-coresight_vpid_to_pid(unsigned long vpid) { return vpid; }
-#endif
-
#endif
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index a97a63eef59f..218df7f4d3e1 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -30,7 +30,7 @@ struct cpu {
};
extern void boot_cpu_init(void);
-extern void boot_cpu_state_init(void);
+extern void boot_cpu_hotplug_init(void);
extern void cpu_init(void);
extern void trap_init(void);
@@ -55,6 +55,8 @@ extern ssize_t cpu_show_spectre_v2(struct device *dev,
struct device_attribute *attr, char *buf);
extern ssize_t cpu_show_spec_store_bypass(struct device *dev,
struct device_attribute *attr, char *buf);
+extern ssize_t cpu_show_l1tf(struct device *dev,
+ struct device_attribute *attr, char *buf);
extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
@@ -103,6 +105,7 @@ extern void cpus_write_lock(void);
extern void cpus_write_unlock(void);
extern void cpus_read_lock(void);
extern void cpus_read_unlock(void);
+extern int cpus_read_trylock(void);
extern void lockdep_assert_cpus_held(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
@@ -115,6 +118,7 @@ static inline void cpus_write_lock(void) { }
static inline void cpus_write_unlock(void) { }
static inline void cpus_read_lock(void) { }
static inline void cpus_read_unlock(void) { }
+static inline int cpus_read_trylock(void) { return true; }
static inline void lockdep_assert_cpus_held(void) { }
static inline void cpu_hotplug_disable(void) { }
static inline void cpu_hotplug_enable(void) { }
@@ -166,4 +170,23 @@ void cpuhp_report_idle_dead(void);
static inline void cpuhp_report_idle_dead(void) { }
#endif /* #ifdef CONFIG_HOTPLUG_CPU */
+enum cpuhp_smt_control {
+ CPU_SMT_ENABLED,
+ CPU_SMT_DISABLED,
+ CPU_SMT_FORCE_DISABLED,
+ CPU_SMT_NOT_SUPPORTED,
+};
+
+#if defined(CONFIG_SMP) && defined(CONFIG_HOTPLUG_SMT)
+extern enum cpuhp_smt_control cpu_smt_control;
+extern void cpu_smt_disable(bool force);
+extern void cpu_smt_check_topology_early(void);
+extern void cpu_smt_check_topology(void);
+#else
+# define cpu_smt_control (CPU_SMT_ENABLED)
+static inline void cpu_smt_disable(bool force) { }
+static inline void cpu_smt_check_topology_early(void) { }
+static inline void cpu_smt_check_topology(void) { }
+#endif
+
#endif /* _LINUX_CPU_H_ */
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 8796ba387152..caf40ad0bbc6 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -125,6 +125,7 @@ enum cpuhp_state {
CPUHP_AP_MARCO_TIMER_STARTING,
CPUHP_AP_MIPS_GIC_TIMER_STARTING,
CPUHP_AP_ARC_TIMER_STARTING,
+ CPUHP_AP_RISCV_TIMER_STARTING,
CPUHP_AP_KVM_STARTING,
CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
CPUHP_AP_KVM_ARM_VGIC_STARTING,
@@ -143,6 +144,7 @@ enum cpuhp_state {
CPUHP_AP_SMPBOOT_THREADS,
CPUHP_AP_X86_VDSO_VMA_ONLINE,
CPUHP_AP_IRQ_AFFINITY_ONLINE,
+ CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS,
CPUHP_AP_PERF_ONLINE,
CPUHP_AP_PERF_X86_ONLINE,
CPUHP_AP_PERF_X86_UNCORE_ONLINE,
@@ -164,6 +166,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE,
CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE,
+ CPUHP_AP_WATCHDOG_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_ONLINE_DYN,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index bf53d893ad02..147bdec42215 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -115,12 +115,17 @@ extern struct cpumask __cpu_active_mask;
#define cpu_active(cpu) ((cpu) == 0)
#endif
-/* verify cpu argument to cpumask_* operators */
-static inline unsigned int cpumask_check(unsigned int cpu)
+static inline void cpu_max_bits_warn(unsigned int cpu, unsigned int bits)
{
#ifdef CONFIG_DEBUG_PER_CPU_MAPS
- WARN_ON_ONCE(cpu >= nr_cpumask_bits);
+ WARN_ON_ONCE(cpu >= bits);
#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
+}
+
+/* verify cpu argument to cpumask_* operators */
+static inline unsigned int cpumask_check(unsigned int cpu)
+{
+ cpu_max_bits_warn(cpu, nr_cpumask_bits);
return cpu;
}
@@ -154,6 +159,13 @@ static inline unsigned int cpumask_next_and(int n,
return n+1;
}
+static inline unsigned int cpumask_next_wrap(int n, const struct cpumask *mask,
+ int start, bool wrap)
+{
+ /* cpu0 unless stop condition, wrap and at cpu0, then nr_cpumask_bits */
+ return (wrap && n == 0);
+}
+
/* cpu must be a valid cpu, ie 0, so there's no other choice. */
static inline unsigned int cpumask_any_but(const struct cpumask *mask,
unsigned int cpu)
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index b511f6d24b42..525510a9f965 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -60,6 +60,8 @@ phys_addr_t paddr_vmcoreinfo_note(void);
#define VMCOREINFO_CONFIG(name) \
vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
+extern unsigned char *vmcoreinfo_data;
+extern size_t vmcoreinfo_size;
extern u32 *vmcoreinfo_note;
Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
diff --git a/include/linux/crc32poly.h b/include/linux/crc32poly.h
new file mode 100644
index 000000000000..62c4b7790a28
--- /dev/null
+++ b/include/linux/crc32poly.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CRC32_POLY_H
+#define _LINUX_CRC32_POLY_H
+
+/*
+ * There are multiple 16-bit CRC polynomials in common use, but this is
+ * *the* standard CRC-32 polynomial, first popularized by Ethernet.
+ * x^32+x^26+x^23+x^22+x^16+x^12+x^11+x^10+x^8+x^7+x^5+x^4+x^2+x^1+x^0
+ */
+#define CRC32_POLY_LE 0xedb88320
+#define CRC32_POLY_BE 0x04c11db7
+
+/*
+ * This is the CRC32c polynomial, as outlined by Castagnoli.
+ * x^32+x^28+x^27+x^26+x^25+x^23+x^22+x^20+x^19+x^18+x^14+x^13+x^11+x^10+x^9+
+ * x^8+x^6+x^0
+ */
+#define CRC32C_POLY_LE 0x82F63B78
+
+#endif /* _LINUX_CRC32_POLY_H */
diff --git a/include/linux/crc64.h b/include/linux/crc64.h
new file mode 100644
index 000000000000..c756e65a1b58
--- /dev/null
+++ b/include/linux/crc64.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * See lib/crc64.c for the related specification and polynomial arithmetic.
+ */
+#ifndef _LINUX_CRC64_H
+#define _LINUX_CRC64_H
+
+#include <linux/types.h>
+
+u64 __pure crc64_be(u64 crc, const void *p, size_t len);
+#endif /* _LINUX_CRC64_H */
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 631286535d0f..7eed6101c791 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -65,6 +65,12 @@ extern void groups_free(struct group_info *);
extern int in_group_p(kgid_t);
extern int in_egroup_p(kgid_t);
+extern int groups_search(const struct group_info *, kgid_t);
+
+extern int set_current_groups(struct group_info *);
+extern void set_groups(struct cred *, struct group_info *);
+extern bool may_setgroups(void);
+extern void groups_sort(struct group_info *);
#else
static inline void groups_free(struct group_info *group_info)
{
@@ -78,12 +84,11 @@ static inline int in_egroup_p(kgid_t grp)
{
return 1;
}
+static inline int groups_search(const struct group_info *group_info, kgid_t grp)
+{
+ return 1;
+}
#endif
-extern int set_current_groups(struct group_info *);
-extern void set_groups(struct cred *, struct group_info *);
-extern int groups_search(const struct group_info *, kgid_t);
-extern bool may_setgroups(void);
-extern void groups_sort(struct group_info *);
/*
* The security context of a task
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 6eb06101089f..e8839d3a7559 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -113,6 +113,11 @@
#define CRYPTO_ALG_OPTIONAL_KEY 0x00004000
/*
+ * Don't trigger module loading
+ */
+#define CRYPTO_NOLOAD 0x00008000
+
+/*
* Transform masks and values (for crt_flags).
*/
#define CRYPTO_TFM_NEED_KEY 0x00000001
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 3855e3800f48..deb0f663252f 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -135,7 +135,7 @@ void dax_flush(struct dax_device *dax_dev, void *addr, size_t size);
ssize_t dax_iomap_rw(struct kiocb *iocb, struct iov_iter *iter,
const struct iomap_ops *ops);
-int dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
+vm_fault_t dax_iomap_fault(struct vm_fault *vmf, enum page_entry_size pe_size,
pfn_t *pfnp, int *errp, const struct iomap_ops *ops);
vm_fault_t dax_finish_sync_fault(struct vm_fault *vmf,
enum page_entry_size pe_size, pfn_t pfn);
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 66c6e17e61e5..ef4b70f64f33 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -145,8 +145,7 @@ struct dentry_operations {
char *(*d_dname)(struct dentry *, char *, int);
struct vfsmount *(*d_automount)(struct path *);
int (*d_manage)(const struct path *, bool);
- struct dentry *(*d_real)(struct dentry *, const struct inode *,
- unsigned int, unsigned int);
+ struct dentry *(*d_real)(struct dentry *, const struct inode *);
} ____cacheline_aligned;
/*
@@ -227,7 +226,6 @@ extern void d_instantiate(struct dentry *, struct inode *);
extern void d_instantiate_new(struct dentry *, struct inode *);
extern struct dentry * d_instantiate_unique(struct dentry *, struct inode *);
extern struct dentry * d_instantiate_anon(struct dentry *, struct inode *);
-extern int d_instantiate_no_diralias(struct dentry *, struct inode *);
extern void __d_drop(struct dentry *dentry);
extern void d_drop(struct dentry *dentry);
extern void d_delete(struct dentry *);
@@ -271,8 +269,6 @@ extern void d_rehash(struct dentry *);
extern void d_add(struct dentry *, struct inode *);
-extern void dentry_update_name_case(struct dentry *, const struct qstr *);
-
/* used for rename() and baskets */
extern void d_move(struct dentry *, struct dentry *);
extern void d_exchange(struct dentry *, struct dentry *);
@@ -564,15 +560,10 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
return upper;
}
-/* d_real() flags */
-#define D_REAL_UPPER 0x2 /* return upper dentry or NULL if non-upper */
-
/**
* d_real - Return the real dentry
* @dentry: the dentry to query
* @inode: inode to select the dentry from multiple layers (can be NULL)
- * @open_flags: open flags to control copy-up behavior
- * @flags: flags to control what is returned by this function
*
* If dentry is on a union/overlay, then return the underlying, real dentry.
* Otherwise return the dentry itself.
@@ -580,11 +571,10 @@ static inline struct dentry *d_backing_dentry(struct dentry *upper)
* See also: Documentation/filesystems/vfs.txt
*/
static inline struct dentry *d_real(struct dentry *dentry,
- const struct inode *inode,
- unsigned int open_flags, unsigned int flags)
+ const struct inode *inode)
{
if (unlikely(dentry->d_flags & DCACHE_OP_REAL))
- return dentry->d_op->d_real(dentry, inode, open_flags, flags);
+ return dentry->d_op->d_real(dentry, inode);
else
return dentry;
}
@@ -599,7 +589,7 @@ static inline struct dentry *d_real(struct dentry *dentry,
static inline struct inode *d_real_inode(const struct dentry *dentry)
{
/* This usage of d_real() results in const dentry */
- return d_backing_inode(d_real((struct dentry *) dentry, NULL, 0, 0));
+ return d_backing_inode(d_real((struct dentry *) dentry, NULL));
}
struct name_snapshot {
diff --git a/include/linux/delayacct.h b/include/linux/delayacct.h
index e6c0448ebcc7..31c865d1842e 100644
--- a/include/linux/delayacct.h
+++ b/include/linux/delayacct.h
@@ -124,7 +124,7 @@ static inline void delayacct_blkio_start(void)
static inline void delayacct_blkio_end(struct task_struct *p)
{
- if (current->delays)
+ if (p->delays)
__delayacct_blkio_end(p);
delayacct_clear_flag(DELAYACCT_PF_BLKIO);
}
diff --git a/include/linux/device.h b/include/linux/device.h
index 055a69dbcd18..8f882549edee 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -90,7 +90,7 @@ extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
* @num_vf: Called to find out how many virtual functions a device on this
* bus supports.
* @dma_configure: Called to setup DMA configuration on a device on
- this bus.
+ * this bus.
* @pm: Power management operations of this bus, callback the specific
* device driver's pm-ops.
* @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
@@ -339,6 +339,8 @@ struct device *driver_find_device(struct device_driver *drv,
struct device *start, void *data,
int (*match)(struct device *dev, void *data));
+int driver_deferred_probe_check_state(struct device *dev);
+
/**
* struct subsys_interface - interfaces to device functions
* @name: name of the device function
@@ -384,6 +386,9 @@ int subsys_virtual_register(struct bus_type *subsys,
* @shutdown_pre: Called at shut-down time before driver shutdown.
* @ns_type: Callbacks so sysfs can detemine namespaces.
* @namespace: Namespace of the device belongs to this class.
+ * @get_ownership: Allows class to specify uid/gid of the sysfs directories
+ * for the devices belonging to the class. Usually tied to
+ * device's namespace.
* @pm: The default device power management operations of this class.
* @p: The private data of the driver core, no one other than the
* driver core can touch this.
@@ -413,6 +418,8 @@ struct class {
const struct kobj_ns_type_operations *ns_type;
const void *(*namespace)(struct device *dev);
+ void (*get_ownership)(struct device *dev, kuid_t *uid, kgid_t *gid);
+
const struct dev_pm_ops *pm;
struct subsys_private *p;
@@ -696,6 +703,10 @@ extern void devm_free_pages(struct device *dev, unsigned long addr);
void __iomem *devm_ioremap_resource(struct device *dev, struct resource *res);
+void __iomem *devm_of_iomap(struct device *dev,
+ struct device_node *node, int index,
+ resource_size_t *size);
+
/* allows to add/remove a custom action to devres stack */
int devm_add_action(struct device *dev, void (*action)(void *), void *data);
void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
@@ -784,14 +795,16 @@ enum device_link_state {
* Device link flags.
*
* STATELESS: The core won't track the presence of supplier/consumer drivers.
- * AUTOREMOVE: Remove this link automatically on consumer driver unbind.
+ * AUTOREMOVE_CONSUMER: Remove the link automatically on consumer driver unbind.
* PM_RUNTIME: If set, the runtime PM framework will use this link.
* RPM_ACTIVE: Run pm_runtime_get_sync() on the supplier during link creation.
+ * AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind.
*/
-#define DL_FLAG_STATELESS BIT(0)
-#define DL_FLAG_AUTOREMOVE BIT(1)
-#define DL_FLAG_PM_RUNTIME BIT(2)
-#define DL_FLAG_RPM_ACTIVE BIT(3)
+#define DL_FLAG_STATELESS BIT(0)
+#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1)
+#define DL_FLAG_PM_RUNTIME BIT(2)
+#define DL_FLAG_RPM_ACTIVE BIT(3)
+#define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4)
/**
* struct device_link - Device link representation.
@@ -886,6 +899,8 @@ struct dev_links_info {
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
* hardware supports 64-bit addresses for consistent allocations
* such descriptors.
+ * @bus_dma_mask: Mask of an upstream bridge or bus which imposes a smaller DMA
+ * limit than the device itself supports.
* @dma_pfn_offset: offset of DMA memory range relatively of RAM
* @dma_parms: A low level driver may set these to teach IOMMU code about
* segment limitations.
@@ -912,8 +927,6 @@ struct dev_links_info {
* @offline: Set after successful invocation of bus type's .offline().
* @of_node_reused: Set if the device-tree node is shared with an ancestor
* device.
- * @dma_32bit_limit: bridge limited to 32bit DMA even if the device itself
- * indicates support for a higher limit in the dma_mask field.
*
* At the lowest level, every device in a Linux system is represented by an
* instance of struct device. The device structure contains the information
@@ -967,6 +980,7 @@ struct device {
not all hardware supports
64 bit addresses for consistent
allocations such descriptors. */
+ u64 bus_dma_mask; /* upstream dma_mask constraint */
unsigned long dma_pfn_offset;
struct device_dma_parameters *dma_parms;
@@ -1002,7 +1016,6 @@ struct device {
bool offline_disabled:1;
bool offline:1;
bool of_node_reused:1;
- bool dma_32bit_limit:1;
};
static inline struct device *kobj_to_dev(struct kobject *kobj)
@@ -1316,31 +1329,36 @@ extern const char *dev_driver_string(const struct device *dev);
struct device_link *device_link_add(struct device *consumer,
struct device *supplier, u32 flags);
void device_link_del(struct device_link *link);
+void device_link_remove(void *consumer, struct device *supplier);
+
+#ifndef dev_fmt
+#define dev_fmt(fmt) fmt
+#endif
#ifdef CONFIG_PRINTK
-extern __printf(3, 0)
+__printf(3, 0)
int dev_vprintk_emit(int level, const struct device *dev,
const char *fmt, va_list args);
-extern __printf(3, 4)
+__printf(3, 4)
int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
-extern __printf(3, 4)
+__printf(3, 4)
void dev_printk(const char *level, const struct device *dev,
const char *fmt, ...);
-extern __printf(2, 3)
-void dev_emerg(const struct device *dev, const char *fmt, ...);
-extern __printf(2, 3)
-void dev_alert(const struct device *dev, const char *fmt, ...);
-extern __printf(2, 3)
-void dev_crit(const struct device *dev, const char *fmt, ...);
-extern __printf(2, 3)
-void dev_err(const struct device *dev, const char *fmt, ...);
-extern __printf(2, 3)
-void dev_warn(const struct device *dev, const char *fmt, ...);
-extern __printf(2, 3)
-void dev_notice(const struct device *dev, const char *fmt, ...);
-extern __printf(2, 3)
+__printf(2, 3)
+void _dev_emerg(const struct device *dev, const char *fmt, ...);
+__printf(2, 3)
+void _dev_alert(const struct device *dev, const char *fmt, ...);
+__printf(2, 3)
+void _dev_crit(const struct device *dev, const char *fmt, ...);
+__printf(2, 3)
+void _dev_err(const struct device *dev, const char *fmt, ...);
+__printf(2, 3)
+void _dev_warn(const struct device *dev, const char *fmt, ...);
+__printf(2, 3)
+void _dev_notice(const struct device *dev, const char *fmt, ...);
+__printf(2, 3)
void _dev_info(const struct device *dev, const char *fmt, ...);
#else
@@ -1358,26 +1376,26 @@ static inline void __dev_printk(const char *level, const struct device *dev,
{}
static inline __printf(3, 4)
void dev_printk(const char *level, const struct device *dev,
- const char *fmt, ...)
+ const char *fmt, ...)
{}
static inline __printf(2, 3)
-void dev_emerg(const struct device *dev, const char *fmt, ...)
+void _dev_emerg(const struct device *dev, const char *fmt, ...)
{}
static inline __printf(2, 3)
-void dev_crit(const struct device *dev, const char *fmt, ...)
+void _dev_crit(const struct device *dev, const char *fmt, ...)
{}
static inline __printf(2, 3)
-void dev_alert(const struct device *dev, const char *fmt, ...)
+void _dev_alert(const struct device *dev, const char *fmt, ...)
{}
static inline __printf(2, 3)
-void dev_err(const struct device *dev, const char *fmt, ...)
+void _dev_err(const struct device *dev, const char *fmt, ...)
{}
static inline __printf(2, 3)
-void dev_warn(const struct device *dev, const char *fmt, ...)
+void _dev_warn(const struct device *dev, const char *fmt, ...)
{}
static inline __printf(2, 3)
-void dev_notice(const struct device *dev, const char *fmt, ...)
+void _dev_notice(const struct device *dev, const char *fmt, ...)
{}
static inline __printf(2, 3)
void _dev_info(const struct device *dev, const char *fmt, ...)
@@ -1386,27 +1404,36 @@ void _dev_info(const struct device *dev, const char *fmt, ...)
#endif
/*
- * Stupid hackaround for existing uses of non-printk uses dev_info
- *
- * Note that the definition of dev_info below is actually _dev_info
- * and a macro is used to avoid redefining dev_info
+ * #defines for all the dev_<level> macros to prefix with whatever
+ * possible use of #define dev_fmt(fmt) ...
*/
-#define dev_info(dev, fmt, arg...) _dev_info(dev, fmt, ##arg)
+#define dev_emerg(dev, fmt, ...) \
+ _dev_emerg(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_crit(dev, fmt, ...) \
+ _dev_crit(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_alert(dev, fmt, ...) \
+ _dev_alert(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_err(dev, fmt, ...) \
+ _dev_err(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_warn(dev, fmt, ...) \
+ _dev_warn(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_notice(dev, fmt, ...) \
+ _dev_notice(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_info(dev, fmt, ...) \
+ _dev_info(dev, dev_fmt(fmt), ##__VA_ARGS__)
#if defined(CONFIG_DYNAMIC_DEBUG)
-#define dev_dbg(dev, format, ...) \
-do { \
- dynamic_dev_dbg(dev, format, ##__VA_ARGS__); \
-} while (0)
+#define dev_dbg(dev, fmt, ...) \
+ dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__)
#elif defined(DEBUG)
-#define dev_dbg(dev, format, arg...) \
- dev_printk(KERN_DEBUG, dev, format, ##arg)
+#define dev_dbg(dev, fmt, ...) \
+ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
#else
-#define dev_dbg(dev, format, arg...) \
-({ \
- if (0) \
- dev_printk(KERN_DEBUG, dev, format, ##arg); \
+#define dev_dbg(dev, fmt, ...) \
+({ \
+ if (0) \
+ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
})
#endif
@@ -1478,7 +1505,7 @@ do { \
DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
if (unlikely(descriptor.flags & _DPRINTK_FLAGS_PRINT) && \
__ratelimit(&_rs)) \
- __dynamic_dev_dbg(&descriptor, dev, fmt, \
+ __dynamic_dev_dbg(&descriptor, dev, dev_fmt(fmt), \
##__VA_ARGS__); \
} while (0)
#elif defined(DEBUG)
@@ -1488,23 +1515,23 @@ do { \
DEFAULT_RATELIMIT_INTERVAL, \
DEFAULT_RATELIMIT_BURST); \
if (__ratelimit(&_rs)) \
- dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
+ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
} while (0)
#else
#define dev_dbg_ratelimited(dev, fmt, ...) \
do { \
if (0) \
- dev_printk(KERN_DEBUG, dev, fmt, ##__VA_ARGS__); \
+ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
} while (0)
#endif
#ifdef VERBOSE_DEBUG
#define dev_vdbg dev_dbg
#else
-#define dev_vdbg(dev, format, arg...) \
-({ \
- if (0) \
- dev_printk(KERN_DEBUG, dev, format, ##arg); \
+#define dev_vdbg(dev, fmt, ...) \
+({ \
+ if (0) \
+ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
})
#endif
diff --git a/include/linux/dm-kcopyd.h b/include/linux/dm-kcopyd.h
index cfac8588ed56..e42de7750c88 100644
--- a/include/linux/dm-kcopyd.h
+++ b/include/linux/dm-kcopyd.h
@@ -62,9 +62,9 @@ void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc);
typedef void (*dm_kcopyd_notify_fn)(int read_err, unsigned long write_err,
void *context);
-int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
- unsigned num_dests, struct dm_io_region *dests,
- unsigned flags, dm_kcopyd_notify_fn fn, void *context);
+void dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
+ unsigned num_dests, struct dm_io_region *dests,
+ unsigned flags, dm_kcopyd_notify_fn fn, void *context);
/*
* Prepare a callback and submit it via the kcopyd thread.
@@ -81,9 +81,9 @@ void *dm_kcopyd_prepare_callback(struct dm_kcopyd_client *kc,
dm_kcopyd_notify_fn fn, void *context);
void dm_kcopyd_do_callback(void *job, int read_err, unsigned long write_err);
-int dm_kcopyd_zero(struct dm_kcopyd_client *kc,
- unsigned num_dests, struct dm_io_region *dests,
- unsigned flags, dm_kcopyd_notify_fn fn, void *context);
+void dm_kcopyd_zero(struct dm_kcopyd_client *kc,
+ unsigned num_dests, struct dm_io_region *dests,
+ unsigned flags, dm_kcopyd_notify_fn fn, void *context);
#endif /* __KERNEL__ */
#endif /* _LINUX_DM_KCOPYD_H */
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 085db2fee2d7..58725f890b5b 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -39,12 +39,12 @@ struct dma_buf_attachment;
/**
* struct dma_buf_ops - operations possible on struct dma_buf
- * @map_atomic: maps a page from the buffer into kernel address
+ * @map_atomic: [optional] maps a page from the buffer into kernel address
* space, users may not block until the subsequent unmap call.
* This callback must not sleep.
* @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
* This Callback must not sleep.
- * @map: maps a page from the buffer into kernel address space.
+ * @map: [optional] maps a page from the buffer into kernel address space.
* @unmap: [optional] unmaps a page from the buffer.
* @vmap: [optional] creates a virtual mapping for the buffer into kernel
* address space. Same restrictions as for vmap and friends apply.
@@ -55,11 +55,11 @@ struct dma_buf_ops {
* @attach:
*
* This is called from dma_buf_attach() to make sure that a given
- * &device can access the provided &dma_buf. Exporters which support
- * buffer objects in special locations like VRAM or device-specific
- * carveout areas should check whether the buffer could be move to
- * system memory (or directly accessed by the provided device), and
- * otherwise need to fail the attach operation.
+ * &dma_buf_attachment.dev can access the provided &dma_buf. Exporters
+ * which support buffer objects in special locations like VRAM or
+ * device-specific carveout areas should check whether the buffer could
+ * be move to system memory (or directly accessed by the provided
+ * device), and otherwise need to fail the attach operation.
*
* The exporter should also in general check whether the current
* allocation fullfills the DMA constraints of the new device. If this
@@ -77,8 +77,7 @@ struct dma_buf_ops {
* to signal that backing storage is already allocated and incompatible
* with the requirements of requesting device.
*/
- int (*attach)(struct dma_buf *, struct device *,
- struct dma_buf_attachment *);
+ int (*attach)(struct dma_buf *, struct dma_buf_attachment *);
/**
* @detach:
@@ -206,8 +205,6 @@ struct dma_buf_ops {
* to be restarted.
*/
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
- void *(*map_atomic)(struct dma_buf *, unsigned long);
- void (*unmap_atomic)(struct dma_buf *, unsigned long, void *);
void *(*map)(struct dma_buf *, unsigned long);
void (*unmap)(struct dma_buf *, unsigned long, void *);
@@ -395,8 +392,6 @@ int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
-void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
-void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
void *dma_buf_kmap(struct dma_buf *, unsigned long);
void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
index b67bf6ac907d..f247e8aa5e3d 100644
--- a/include/linux/dma-contiguous.h
+++ b/include/linux/dma-contiguous.h
@@ -48,7 +48,7 @@
* CMA should not be used by the device drivers directly. It is
* only a helper framework for dma-mapping subsystem.
*
- * For more information, see kernel-docs in drivers/base/dma-contiguous.c
+ * For more information, see kernel-docs in kernel/dma/contiguous.c
*/
#ifdef __KERNEL__
@@ -112,7 +112,7 @@ static inline int dma_declare_contiguous(struct device *dev, phys_addr_t size,
}
struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
- unsigned int order, gfp_t gfp_mask);
+ unsigned int order, bool no_warn);
bool dma_release_from_contiguous(struct device *dev, struct page *pages,
int count);
@@ -145,7 +145,7 @@ int dma_declare_contiguous(struct device *dev, phys_addr_t size,
static inline
struct page *dma_alloc_from_contiguous(struct device *dev, size_t count,
- unsigned int order, gfp_t gfp_mask)
+ unsigned int order, bool no_warn)
{
return NULL;
}
diff --git a/include/linux/dma-direction.h b/include/linux/dma-direction.h
index 3649a031893a..9c96e30e6a0b 100644
--- a/include/linux/dma-direction.h
+++ b/include/linux/dma-direction.h
@@ -1,14 +1,12 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_DMA_DIRECTION_H
#define _LINUX_DMA_DIRECTION_H
-/*
- * These definitions mirror those in pci.h, so they can be used
- * interchangeably with their PCI_ counterparts.
- */
+
enum dma_data_direction {
DMA_BIDIRECTIONAL = 0,
DMA_TO_DEVICE = 1,
DMA_FROM_DEVICE = 2,
DMA_NONE = 3,
};
+
#endif
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index eb9b05aa5aea..02dba8cd033d 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -166,7 +166,8 @@ struct dma_fence_ops {
* released when the fence is signalled (through e.g. the interrupt
* handler).
*
- * This callback is mandatory.
+ * This callback is optional. If this callback is not present, then the
+ * driver must always have signaling enabled.
*/
bool (*enable_signaling)(struct dma_fence *fence);
@@ -190,11 +191,14 @@ struct dma_fence_ops {
/**
* @wait:
*
- * Custom wait implementation, or dma_fence_default_wait.
+ * Custom wait implementation, defaults to dma_fence_default_wait() if
+ * not set.
*
- * Must not be NULL, set to dma_fence_default_wait for default implementation.
- * the dma_fence_default_wait implementation should work for any fence, as long
- * as enable_signaling works correctly.
+ * The dma_fence_default_wait implementation should work for any fence, as long
+ * as @enable_signaling works correctly. This hook allows drivers to
+ * have an optimized version for the case where a process context is
+ * already available, e.g. if @enable_signaling for the general case
+ * needs to set up a worker thread.
*
* Must return -ERESTARTSYS if the wait is intr = true and the wait was
* interrupted, and remaining jiffies if fence has signaled, or 0 if wait
@@ -202,7 +206,7 @@ struct dma_fence_ops {
* which should be treated as if the fence is signaled. For example a hardware
* lockup could be reported like that.
*
- * This callback is mandatory.
+ * This callback is optional.
*/
signed long (*wait)(struct dma_fence *fence,
bool intr, signed long timeout);
@@ -218,17 +222,6 @@ struct dma_fence_ops {
void (*release)(struct dma_fence *fence);
/**
- * @fill_driver_data:
- *
- * Callback to fill in free-form debug info.
- *
- * Returns amount of bytes filled, or negative error on failure.
- *
- * This callback is optional.
- */
- int (*fill_driver_data)(struct dma_fence *fence, void *data, int size);
-
- /**
* @fence_value_str:
*
* Callback to fill in free-form debug info specific to this fence, like
@@ -242,8 +235,9 @@ struct dma_fence_ops {
* @timeline_value_str:
*
* Fills in the current value of the timeline as a string, like the
- * sequence number. This should match what @fill_driver_data prints for
- * the most recently signalled fence (assuming no delayed signalling).
+ * sequence number. Note that the specific fence passed to this function
+ * should not matter, drivers should only use it to look up the
+ * corresponding timeline structures.
*/
void (*timeline_value_str)(struct dma_fence *fence,
char *str, int size);
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index f9cc309507d9..1db6a6b46d0d 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -538,10 +538,17 @@ static inline void dma_free_attrs(struct device *dev, size_t size,
const struct dma_map_ops *ops = get_dma_ops(dev);
BUG_ON(!ops);
- WARN_ON(irqs_disabled());
if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
return;
+ /*
+ * On non-coherent platforms which implement DMA-coherent buffers via
+ * non-cacheable remaps, ops->free() may call vunmap(). Thus getting
+ * this far in IRQ context is a) at risk of a BUG_ON() or trying to
+ * sleep on some machines, and b) an indication that the driver is
+ * probably misusing the coherent API anyway.
+ */
+ WARN_ON(irqs_disabled());
if (!ops->free || !cpu_addr)
return;
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index 10b2654d549b..a0aa00cc909d 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -44,4 +44,12 @@ static inline void arch_sync_dma_for_cpu(struct device *dev,
}
#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
+#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
+void arch_sync_dma_for_cpu_all(struct device *dev);
+#else
+static inline void arch_sync_dma_for_cpu_all(struct device *dev)
+{
+}
+#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
+
#endif /* _LINUX_DMA_NONCOHERENT_H */
diff --git a/include/linux/dma/pxa-dma.h b/include/linux/dma/pxa-dma.h
index e56ec7af4fd7..9fc594f69eff 100644
--- a/include/linux/dma/pxa-dma.h
+++ b/include/linux/dma/pxa-dma.h
@@ -9,6 +9,15 @@ enum pxad_chan_prio {
PXAD_PRIO_LOWEST,
};
+/**
+ * struct pxad_param - dma channel request parameters
+ * @drcmr: requestor line number
+ * @prio: minimal mandatory priority of the channel
+ *
+ * If a requested channel is granted, its priority will be at least @prio,
+ * ie. if PXAD_PRIO_LOW is required, the requested channel will be either
+ * PXAD_PRIO_LOW, PXAD_PRIO_NORMAL or PXAD_PRIO_HIGHEST.
+ */
struct pxad_param {
unsigned int drcmr;
enum pxad_chan_prio prio;
diff --git a/include/linux/dma/xilinx_dma.h b/include/linux/dma/xilinx_dma.h
index 34b98f276ed0..5b6e61e4b3aa 100644
--- a/include/linux/dma/xilinx_dma.h
+++ b/include/linux/dma/xilinx_dma.h
@@ -27,6 +27,7 @@
* @delay: Delay counter
* @reset: Reset Channel
* @ext_fsync: External Frame Sync source
+ * @vflip_en: Vertical Flip enable
*/
struct xilinx_vdma_config {
int frm_dly;
@@ -39,6 +40,7 @@ struct xilinx_vdma_config {
int delay;
int reset;
int ext_fsync;
+ bool vflip_en;
};
int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 861be5cab1df..d49ec5c31944 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -415,7 +415,9 @@ enum dma_residue_granularity {
* each type, the dma controller should set BIT(<TYPE>) and same
* should be checked by controller as well
* @max_burst: max burst capability per-transfer
- * @cmd_pause: true, if pause and thereby resume is supported
+ * @cmd_pause: true, if pause is supported (i.e. for reading residue or
+ * for resume later)
+ * @cmd_resume: true, if resume is supported
* @cmd_terminate: true, if terminate cmd is supported
* @residue_granularity: granularity of the reported transfer residue
* @descriptor_reuse: if a descriptor can be reused by client and
@@ -427,6 +429,7 @@ struct dma_slave_caps {
u32 directions;
u32 max_burst;
bool cmd_pause;
+ bool cmd_resume;
bool cmd_terminate;
enum dma_residue_granularity residue_granularity;
bool descriptor_reuse;
@@ -1403,6 +1406,7 @@ static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
/* --- DMA device --- */
int dma_async_device_register(struct dma_device *device);
+int dmaenginem_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index e2433bc50210..843a41ba7e28 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -265,11 +265,6 @@ static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src)
#define PDA_LOW_BIT 26
#define PDA_HIGH_BIT 32
-enum {
- IRQ_REMAP_XAPIC_MODE,
- IRQ_REMAP_X2APIC_MODE,
-};
-
/* Can't use the common MSI interrupt functions
* since DMAR is not a pci device
*/
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 56add823f190..401e4b254e30 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -894,6 +894,16 @@ typedef struct _efi_file_handle {
void *flush;
} efi_file_handle_t;
+typedef struct {
+ u64 revision;
+ u32 open_volume;
+} efi_file_io_interface_32_t;
+
+typedef struct {
+ u64 revision;
+ u64 open_volume;
+} efi_file_io_interface_64_t;
+
typedef struct _efi_file_io_interface {
u64 revision;
int (*open_volume)(struct _efi_file_io_interface *,
@@ -988,14 +998,12 @@ extern void efi_memmap_walk (efi_freemem_callback_t callback, void *arg);
extern void efi_gettimeofday (struct timespec64 *ts);
extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if possible */
#ifdef CONFIG_X86
-extern void efi_late_init(void);
extern void efi_free_boot_services(void);
extern efi_status_t efi_query_variable_store(u32 attributes,
unsigned long size,
bool nonblocking);
extern void efi_find_mirror(void);
#else
-static inline void efi_late_init(void) {}
static inline void efi_free_boot_services(void) {}
static inline efi_status_t efi_query_variable_store(u32 attributes,
@@ -1651,4 +1659,7 @@ struct linux_efi_tpm_eventlog {
extern int efi_tpm_eventlog_init(void);
+/* Workqueue to queue EFI Runtime Services */
+extern struct workqueue_struct *efi_rts_wq;
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index 79563840c295..572e11bb8696 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -59,8 +59,7 @@ struct net_device *devm_alloc_etherdev_mqs(struct device *dev, int sizeof_priv,
unsigned int rxqs);
#define devm_alloc_etherdev(dev, sizeof_priv) devm_alloc_etherdev_mqs(dev, sizeof_priv, 1, 1)
-struct sk_buff **eth_gro_receive(struct sk_buff **head,
- struct sk_buff *skb);
+struct sk_buff *eth_gro_receive(struct list_head *head, struct sk_buff *skb);
int eth_gro_complete(struct sk_buff *skb, int nhoff);
/* Reserved Ethernet Addresses per IEEE 802.1Q */
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 7094718b653b..ffcc7724ca21 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -11,6 +11,7 @@
#include <linux/fcntl.h>
#include <linux/wait.h>
+#include <linux/err.h>
/*
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
diff --git a/include/linux/export.h b/include/linux/export.h
index b768d6dd3c90..ae072bc5aacf 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -18,12 +18,6 @@
#define VMLINUX_SYMBOL_STR(x) __VMLINUX_SYMBOL_STR(x)
#ifndef __ASSEMBLY__
-struct kernel_symbol
-{
- unsigned long value;
- const char *name;
-};
-
#ifdef MODULE
extern struct module __this_module;
#define THIS_MODULE (&__this_module)
@@ -54,19 +48,58 @@ extern struct module __this_module;
#define __CRC_SYMBOL(sym, sec)
#endif
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+#include <linux/compiler.h>
+/*
+ * Emit the ksymtab entry as a pair of relative references: this reduces
+ * the size by half on 64-bit architectures, and eliminates the need for
+ * absolute relocations that require runtime processing on relocatable
+ * kernels.
+ */
+#define __KSYMTAB_ENTRY(sym, sec) \
+ __ADDRESSABLE(sym) \
+ asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \
+ " .balign 8 \n" \
+ "__ksymtab_" #sym ": \n" \
+ " .long " #sym "- . \n" \
+ " .long __kstrtab_" #sym "- . \n" \
+ " .previous \n")
+
+struct kernel_symbol {
+ int value_offset;
+ int name_offset;
+};
+#else
+#define __KSYMTAB_ENTRY(sym, sec) \
+ static const struct kernel_symbol __ksymtab_##sym \
+ __attribute__((section("___ksymtab" sec "+" #sym), used)) \
+ = { (unsigned long)&sym, __kstrtab_##sym }
+
+struct kernel_symbol {
+ unsigned long value;
+ const char *name;
+};
+#endif
+
/* For every exported symbol, place a struct in the __ksymtab section */
#define ___EXPORT_SYMBOL(sym, sec) \
extern typeof(sym) sym; \
__CRC_SYMBOL(sym, sec) \
static const char __kstrtab_##sym[] \
- __attribute__((section("__ksymtab_strings"), aligned(1))) \
+ __attribute__((section("__ksymtab_strings"), used, aligned(1))) \
= #sym; \
- static const struct kernel_symbol __ksymtab_##sym \
- __used \
- __attribute__((section("___ksymtab" sec "+" #sym), used)) \
- = { (unsigned long)&sym, __kstrtab_##sym }
+ __KSYMTAB_ENTRY(sym, sec)
+
+#if defined(__DISABLE_EXPORTS)
+
+/*
+ * Allow symbol exports to be disabled completely so that C code may
+ * be reused in other execution contexts such as the UEFI stub or the
+ * decompressor.
+ */
+#define __EXPORT_SYMBOL(sym, sec)
-#if defined(__KSYM_DEPS__)
+#elif defined(__KSYM_DEPS__)
/*
* For fine grained build dependencies, we want to tell the build system
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index aa5db8b5521a..f70f8ac9c4f4 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -304,11 +304,6 @@ struct f2fs_node {
* For NAT entries
*/
#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
-#define NAT_ENTRY_BITMAP_SIZE ((NAT_ENTRY_PER_BLOCK + 7) / 8)
-#define NAT_ENTRY_BITMAP_SIZE_ALIGNED \
- ((NAT_ENTRY_BITMAP_SIZE + BITS_PER_LONG - 1) / \
- BITS_PER_LONG * BITS_PER_LONG)
-
struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index aa74a228bb92..3e7e75383d32 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -126,7 +126,7 @@ struct fb_cursor_user {
/* The resolution of the passed in fb_info about to change */
#define FB_EVENT_MODE_CHANGE 0x01
-/* The display on this fb_info is beeing suspended, no access to the
+/* The display on this fb_info is being suspended, no access to the
* framebuffer is allowed any more after that call returns
*/
#define FB_EVENT_SUSPEND 0x02
@@ -159,9 +159,9 @@ struct fb_cursor_user {
#define FB_EVENT_FB_UNBIND 0x0E
/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga_switcheroo */
#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F
-/* A hardware display blank early change occured */
+/* A hardware display blank early change occurred */
#define FB_EARLY_EVENT_BLANK 0x10
-/* A hardware display blank revert early change occured */
+/* A hardware display blank revert early change occurred */
#define FB_R_EARLY_EVENT_BLANK 0x11
struct fb_event {
@@ -650,6 +650,10 @@ extern struct fb_info *registered_fb[FB_MAX];
extern int num_registered_fb;
extern struct class *fb_class;
+#define for_each_registered_fb(i) \
+ for (i = 0; i < FB_MAX; i++) \
+ if (!registered_fb[i]) {} else
+
extern int lock_fb_info(struct fb_info *info);
static inline void unlock_fb_info(struct fb_info *info)
diff --git a/include/linux/file.h b/include/linux/file.h
index 279720db984a..6b2fb032416c 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -17,9 +17,12 @@ extern void fput(struct file *);
struct file_operations;
struct vfsmount;
struct dentry;
+struct inode;
struct path;
-extern struct file *alloc_file(const struct path *, fmode_t mode,
- const struct file_operations *fop);
+extern struct file *alloc_file_pseudo(struct inode *, struct vfsmount *,
+ const char *, int flags, const struct file_operations *);
+extern struct file *alloc_file_clone(struct file *, int flags,
+ const struct file_operations *);
static inline void fput_light(struct file *file, int fput_needed)
{
@@ -78,7 +81,6 @@ extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
extern void set_close_on_exec(unsigned int fd, int flag);
extern bool get_close_on_exec(unsigned int fd);
-extern void put_filp(struct file *);
extern int get_unused_fd_flags(unsigned flags);
extern void put_unused_fd(unsigned int fd);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 45fc0f5000d8..6791a0ac0139 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -19,6 +19,7 @@
#include <linux/cryptohash.h>
#include <linux/set_memory.h>
#include <linux/kallsyms.h>
+#include <linux/if_vlan.h>
#include <net/sch_generic.h>
@@ -31,6 +32,7 @@ struct seccomp_data;
struct bpf_prog_aux;
struct xdp_rxq_info;
struct xdp_buff;
+struct sock_reuseport;
/* ArgX, context and stack frame pointer register positions. Note,
* Arg1, Arg2, Arg3, etc are used as argument mappings of function
@@ -469,15 +471,16 @@ struct sock_fprog_kern {
};
struct bpf_binary_header {
- unsigned int pages;
- u8 image[];
+ u32 pages;
+ /* Some arches need word alignment for their instructions */
+ u8 image[] __aligned(4);
};
struct bpf_prog {
u16 pages; /* Number of allocated pages */
u16 jited:1, /* Is our filter JIT'ed? */
jit_requested:1,/* archs need to JIT the prog */
- locked:1, /* Program image locked? */
+ undo_set_mem:1, /* Passed set_memory_ro() checkpoint */
gpl_compatible:1, /* Is filter GPL compatible? */
cb_access:1, /* Is control block accessed? */
dst_needed:1, /* Do we need dst entry? */
@@ -535,6 +538,19 @@ struct sk_msg_buff {
struct list_head list;
};
+struct bpf_redirect_info {
+ u32 ifindex;
+ u32 flags;
+ struct bpf_map *map;
+ struct bpf_map *map_to_flush;
+ u32 kern_flags;
+};
+
+DECLARE_PER_CPU(struct bpf_redirect_info, bpf_redirect_info);
+
+/* flags for bpf_redirect_info kern_flags */
+#define BPF_RI_F_RF_NO_DIRECT BIT(0) /* no napi_direct on return_frame */
+
/* Compute the linear packet data range [data, data_end) which
* will be accessed by various program types (cls_bpf, act_bpf,
* lwt, ...). Subsystems allowing direct data access must (!)
@@ -671,50 +687,27 @@ bpf_ctx_narrow_access_ok(u32 off, u32 size, u32 size_default)
#define bpf_classic_proglen(fprog) (fprog->len * sizeof(fprog->filter[0]))
-#ifdef CONFIG_ARCH_HAS_SET_MEMORY
-static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
-{
- fp->locked = 1;
- WARN_ON_ONCE(set_memory_ro((unsigned long)fp, fp->pages));
-}
-
-static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
-{
- if (fp->locked) {
- WARN_ON_ONCE(set_memory_rw((unsigned long)fp, fp->pages));
- /* In case set_memory_rw() fails, we want to be the first
- * to crash here instead of some random place later on.
- */
- fp->locked = 0;
- }
-}
-
-static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
-{
- WARN_ON_ONCE(set_memory_ro((unsigned long)hdr, hdr->pages));
-}
-
-static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
-{
- WARN_ON_ONCE(set_memory_rw((unsigned long)hdr, hdr->pages));
-}
-#else
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
+ fp->undo_set_mem = 1;
+ set_memory_ro((unsigned long)fp, fp->pages);
}
static inline void bpf_prog_unlock_ro(struct bpf_prog *fp)
{
+ if (fp->undo_set_mem)
+ set_memory_rw((unsigned long)fp, fp->pages);
}
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
{
+ set_memory_ro((unsigned long)hdr, hdr->pages);
}
static inline void bpf_jit_binary_unlock_ro(struct bpf_binary_header *hdr)
{
+ set_memory_rw((unsigned long)hdr, hdr->pages);
}
-#endif /* CONFIG_ARCH_HAS_SET_MEMORY */
static inline struct bpf_binary_header *
bpf_jit_binary_hdr(const struct bpf_prog *fp)
@@ -759,6 +752,7 @@ int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_attach_bpf(u32 ufd, struct sock *sk);
int sk_reuseport_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_reuseport_attach_bpf(u32 ufd, struct sock *sk);
+void sk_reuseport_prog_free(struct bpf_prog *prog);
int sk_detach_filter(struct sock *sk);
int sk_get_filter(struct sock *sk, struct sock_filter __user *filter,
unsigned int len);
@@ -786,6 +780,44 @@ static inline bool bpf_dump_raw_ok(void)
struct bpf_prog *bpf_patch_insn_single(struct bpf_prog *prog, u32 off,
const struct bpf_insn *patch, u32 len);
+void bpf_clear_redirect_map(struct bpf_map *map);
+
+static inline bool xdp_return_frame_no_direct(void)
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+
+ return ri->kern_flags & BPF_RI_F_RF_NO_DIRECT;
+}
+
+static inline void xdp_set_return_frame_no_direct(void)
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+
+ ri->kern_flags |= BPF_RI_F_RF_NO_DIRECT;
+}
+
+static inline void xdp_clear_return_frame_no_direct(void)
+{
+ struct bpf_redirect_info *ri = this_cpu_ptr(&bpf_redirect_info);
+
+ ri->kern_flags &= ~BPF_RI_F_RF_NO_DIRECT;
+}
+
+static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
+ unsigned int pktlen)
+{
+ unsigned int len;
+
+ if (unlikely(!(fwd->flags & IFF_UP)))
+ return -ENETDOWN;
+
+ len = fwd->mtu + fwd->hard_header_len + VLAN_HLEN;
+ if (pktlen > len)
+ return -EMSGSIZE;
+
+ return 0;
+}
+
/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
* same cpu context. Further for best results no more than a single map
* for the do_redirect/do_flush pair should be used. This limitation is
@@ -804,6 +836,20 @@ void bpf_warn_invalid_xdp_action(u32 act);
struct sock *do_sk_redirect_map(struct sk_buff *skb);
struct sock *do_msg_redirect_map(struct sk_msg_buff *md);
+#ifdef CONFIG_INET
+struct sock *bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
+ struct bpf_prog *prog, struct sk_buff *skb,
+ u32 hash);
+#else
+static inline struct sock *
+bpf_run_sk_reuseport(struct sock_reuseport *reuse, struct sock *sk,
+ struct bpf_prog *prog, struct sk_buff *skb,
+ u32 hash)
+{
+ return NULL;
+}
+#endif
+
#ifdef CONFIG_BPF_JIT
extern int bpf_jit_enable;
extern int bpf_jit_harden;
@@ -961,6 +1007,9 @@ static inline void bpf_prog_kallsyms_del(struct bpf_prog *fp)
}
#endif /* CONFIG_BPF_JIT */
+void bpf_prog_kallsyms_del_subprogs(struct bpf_prog *fp);
+void bpf_prog_kallsyms_del_all(struct bpf_prog *fp);
+
#define BPF_ANC BIT(15)
static inline bool bpf_needs_clear_a(const struct sock_filter *first)
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index eec7c2478b0d..8942e61f0028 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -77,6 +77,7 @@ enum fpga_mgr_states {
* @sgt: scatter/gather table containing FPGA image
* @buf: contiguous buffer containing FPGA image
* @count: size of buf
+ * @region_id: id of target region
* @dev: device that owns this
* @overlay: Device Tree overlay
*/
@@ -89,6 +90,7 @@ struct fpga_image_info {
struct sg_table *sgt;
const char *buf;
size_t count;
+ int region_id;
struct device *dev;
#ifdef CONFIG_OF
struct device_node *overlay;
@@ -99,6 +101,7 @@ struct fpga_image_info {
* struct fpga_manager_ops - ops for low level fpga manager drivers
* @initial_header_size: Maximum number of bytes that should be passed into write_init
* @state: returns an enum value of the FPGA's state
+ * @status: returns status of the FPGA, including reconfiguration error code
* @write_init: prepare the FPGA to receive confuration data
* @write: write count bytes of configuration data to the FPGA
* @write_sg: write the scatter list of configuration data to the FPGA
@@ -113,6 +116,7 @@ struct fpga_image_info {
struct fpga_manager_ops {
size_t initial_header_size;
enum fpga_mgr_states (*state)(struct fpga_manager *mgr);
+ u64 (*status)(struct fpga_manager *mgr);
int (*write_init)(struct fpga_manager *mgr,
struct fpga_image_info *info,
const char *buf, size_t count);
@@ -124,12 +128,31 @@ struct fpga_manager_ops {
const struct attribute_group **groups;
};
+/* FPGA manager status: Partial/Full Reconfiguration errors */
+#define FPGA_MGR_STATUS_OPERATION_ERR BIT(0)
+#define FPGA_MGR_STATUS_CRC_ERR BIT(1)
+#define FPGA_MGR_STATUS_INCOMPATIBLE_IMAGE_ERR BIT(2)
+#define FPGA_MGR_STATUS_IP_PROTOCOL_ERR BIT(3)
+#define FPGA_MGR_STATUS_FIFO_OVERFLOW_ERR BIT(4)
+
+/**
+ * struct fpga_compat_id - id for compatibility check
+ *
+ * @id_h: high 64bit of the compat_id
+ * @id_l: low 64bit of the compat_id
+ */
+struct fpga_compat_id {
+ u64 id_h;
+ u64 id_l;
+};
+
/**
* struct fpga_manager - fpga manager structure
* @name: name of low level fpga manager
* @dev: fpga manager device
* @ref_mutex: only allows one reference to fpga manager
* @state: state of fpga manager
+ * @compat_id: FPGA manager id for compatibility check.
* @mops: pointer to struct of fpga manager ops
* @priv: low level driver private date
*/
@@ -138,6 +161,7 @@ struct fpga_manager {
struct device dev;
struct mutex ref_mutex;
enum fpga_mgr_states state;
+ struct fpga_compat_id *compat_id;
const struct fpga_manager_ops *mops;
void *priv;
};
diff --git a/include/linux/fpga/fpga-region.h b/include/linux/fpga/fpga-region.h
index d7071cddd727..0521b7f577a4 100644
--- a/include/linux/fpga/fpga-region.h
+++ b/include/linux/fpga/fpga-region.h
@@ -14,6 +14,7 @@
* @bridge_list: list of FPGA bridges specified in region
* @mgr: FPGA manager
* @info: FPGA image info
+ * @compat_id: FPGA region id for compatibility check.
* @priv: private data
* @get_bridges: optional function to get bridges to a list
*/
@@ -23,6 +24,7 @@ struct fpga_region {
struct list_head bridge_list;
struct fpga_manager *mgr;
struct fpga_image_info *info;
+ struct fpga_compat_id *compat_id;
void *priv;
int (*get_bridges)(struct fpga_region *region);
};
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 5c91108846db..33322702c910 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -74,6 +74,8 @@ extern struct inodes_stat_t inodes_stat;
extern int leases_enable, lease_break_time;
extern int sysctl_protected_symlinks;
extern int sysctl_protected_hardlinks;
+extern int sysctl_protected_fifos;
+extern int sysctl_protected_regular;
typedef __kernel_rwf_t rwf_t;
@@ -148,12 +150,18 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
/* Has write method(s) */
#define FMODE_CAN_WRITE ((__force fmode_t)0x40000)
+#define FMODE_OPENED ((__force fmode_t)0x80000)
+#define FMODE_CREATED ((__force fmode_t)0x100000)
+
/* File was opened by fanotify and shouldn't generate fanotify events */
#define FMODE_NONOTIFY ((__force fmode_t)0x4000000)
/* File is capable of returning -EAGAIN if I/O will block */
#define FMODE_NOWAIT ((__force fmode_t)0x8000000)
+/* File does not contribute to nr_files count */
+#define FMODE_NOACCOUNT ((__force fmode_t)0x20000000)
+
/*
* Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
* that indicates that they should check the contents of the iovec are
@@ -176,7 +184,6 @@ typedef int (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
#define ATTR_ATIME_SET (1 << 7)
#define ATTR_MTIME_SET (1 << 8)
#define ATTR_FORCE (1 << 9) /* Not a change, but a change it */
-#define ATTR_ATTR_FLAG (1 << 10)
#define ATTR_KILL_SUID (1 << 11)
#define ATTR_KILL_SGID (1 << 12)
#define ATTR_FILE (1 << 13)
@@ -275,6 +282,7 @@ struct writeback_control;
/*
* Write life time hint values.
+ * Stored in struct inode as u8.
*/
enum rw_hint {
WRITE_LIFE_NOT_SET = 0,
@@ -341,6 +349,10 @@ struct address_space_operations {
/* Set a page dirty. Return true if this dirtied it */
int (*set_page_dirty)(struct page *page);
+ /*
+ * Reads in the requested pages. Unlike ->readpage(), this is
+ * PURELY used for read-ahead!.
+ */
int (*readpages)(struct file *filp, struct address_space *mapping,
struct list_head *pages, unsigned nr_pages);
@@ -609,8 +621,8 @@ struct inode {
struct timespec64 i_ctime;
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
unsigned short i_bytes;
- unsigned int i_blkbits;
- enum rw_hint i_write_hint;
+ u8 i_blkbits;
+ u8 i_write_hint;
blkcnt_t i_blocks;
#ifdef __NEED_I_SIZE_ORDERED
@@ -685,6 +697,17 @@ static inline int inode_unhashed(struct inode *inode)
}
/*
+ * __mark_inode_dirty expects inodes to be hashed. Since we don't
+ * want special inodes in the fileset inode space, we make them
+ * appear hashed, but do not put on any lists. hlist_del()
+ * will work fine and require no locking.
+ */
+static inline void inode_fake_hash(struct inode *inode)
+{
+ hlist_add_fake(&inode->i_hash);
+}
+
+/*
* inode->i_mutex nesting subclasses for the lock validator:
*
* 0: the object of the current VFS operation
@@ -1049,17 +1072,7 @@ struct file_lock_context {
extern void send_sigio(struct fown_struct *fown, int fd, int band);
-/*
- * Return the inode to use for locking
- *
- * For overlayfs this should be the overlay inode, not the real inode returned
- * by file_inode(). For any other fs file_inode(filp) and locks_inode(filp) are
- * equal.
- */
-static inline struct inode *locks_inode(const struct file *f)
-{
- return f->f_path.dentry->d_inode;
-}
+#define locks_inode(f) file_inode(f)
#ifdef CONFIG_FILE_LOCKING
extern int fcntl_getlk(struct file *, unsigned int, struct flock *);
@@ -1244,7 +1257,7 @@ static inline struct inode *file_inode(const struct file *f)
static inline struct dentry *file_dentry(const struct file *file)
{
- return d_real(file->f_path.dentry, file_inode(file), 0, 0);
+ return d_real(file->f_path.dentry, file_inode(file));
}
static inline int locks_lock_file_wait(struct file *filp, struct file_lock *fl)
@@ -1300,7 +1313,6 @@ extern int send_sigurg(struct fown_struct *fown);
/* These sb flags are internal to the kernel */
#define SB_SUBMOUNT (1<<26)
-#define SB_NOREMOTELOCK (1<<27)
#define SB_NOSEC (1<<28)
#define SB_BORN (1<<29)
#define SB_ACTIVE (1<<30)
@@ -1629,6 +1641,8 @@ int vfs_mkobj(struct dentry *, umode_t,
int (*f)(struct dentry *, umode_t, void *),
void *);
+extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+
/*
* VFS file helper functions.
*/
@@ -1720,8 +1734,6 @@ struct file_operations {
int (*iterate) (struct file *, struct dir_context *);
int (*iterate_shared) (struct file *, struct dir_context *);
__poll_t (*poll) (struct file *, struct poll_table_struct *);
- struct wait_queue_head * (*get_poll_head)(struct file *, __poll_t);
- __poll_t (*poll_mask) (struct file *, __poll_t);
long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
int (*mmap) (struct file *, struct vm_area_struct *);
@@ -1749,7 +1761,7 @@ struct file_operations {
loff_t, size_t, unsigned int);
int (*clone_file_range)(struct file *, loff_t, struct file *, loff_t,
u64);
- ssize_t (*dedupe_file_range)(struct file *, u64, u64, struct file *,
+ int (*dedupe_file_range)(struct file *, loff_t, struct file *, loff_t,
u64);
} __randomize_layout;
@@ -1778,7 +1790,7 @@ struct inode_operations {
int (*update_time)(struct inode *, struct timespec64 *, int);
int (*atomic_open)(struct inode *, struct dentry *,
struct file *, unsigned open_flag,
- umode_t create_mode, int *opened);
+ umode_t create_mode);
int (*tmpfile) (struct inode *, struct dentry *, umode_t);
int (*set_acl)(struct inode *, struct posix_acl *, int);
} ____cacheline_aligned;
@@ -1822,6 +1834,10 @@ extern int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff,
loff_t len, bool *is_same);
extern int vfs_dedupe_file_range(struct file *file,
struct file_dedupe_range *same);
+extern int vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos,
+ struct file *dst_file, loff_t dst_pos,
+ u64 len);
+
struct super_operations {
struct inode *(*alloc_inode)(struct super_block *sb);
@@ -2016,6 +2032,8 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
* I_OVL_INUSE Used by overlayfs to get exclusive ownership on upper
* and work dirs among overlayfs mounts.
*
+ * I_CREATING New object's inode in the middle of setting up.
+ *
* Q: What is the difference between I_WILL_FREE and I_FREEING?
*/
#define I_DIRTY_SYNC (1 << 0)
@@ -2036,7 +2054,8 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
#define __I_DIRTY_TIME_EXPIRED 12
#define I_DIRTY_TIME_EXPIRED (1 << __I_DIRTY_TIME_EXPIRED)
#define I_WB_SWITCH (1 << 13)
-#define I_OVL_INUSE (1 << 14)
+#define I_OVL_INUSE (1 << 14)
+#define I_CREATING (1 << 15)
#define I_DIRTY_INODE (I_DIRTY_SYNC | I_DIRTY_DATASYNC)
#define I_DIRTY (I_DIRTY_INODE | I_DIRTY_PAGES)
@@ -2077,6 +2096,7 @@ enum file_time_flags {
S_VERSION = 8,
};
+extern bool atime_needs_update(const struct path *, struct inode *);
extern void touch_atime(const struct path *);
static inline void file_accessed(struct file *file)
{
@@ -2422,6 +2442,12 @@ extern struct file *filp_open(const char *, int, umode_t);
extern struct file *file_open_root(struct dentry *, struct vfsmount *,
const char *, int, umode_t);
extern struct file * dentry_open(const struct path *, int, const struct cred *);
+extern struct file * open_with_fake_path(const struct path *, int,
+ struct inode*, const struct cred *);
+static inline struct file *file_clone_open(struct file *file)
+{
+ return dentry_open(&file->f_path, file->f_flags, file->f_cred);
+}
extern int filp_close(struct file *, fl_owner_t id);
extern struct filename *getname_flags(const char __user *, int, int *);
@@ -2429,13 +2455,8 @@ extern struct filename *getname(const char __user *);
extern struct filename *getname_kernel(const char *);
extern void putname(struct filename *name);
-enum {
- FILE_CREATED = 1,
- FILE_OPENED = 2
-};
extern int finish_open(struct file *file, struct dentry *dentry,
- int (*open)(struct inode *, struct file *),
- int *opened);
+ int (*open)(struct inode *, struct file *));
extern int finish_no_open(struct file *file, struct dentry *dentry);
/* fs/ioctl.c */
@@ -2623,8 +2644,6 @@ static inline int filemap_fdatawait(struct address_space *mapping)
extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
loff_t lend);
-extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
- loff_t lend);
extern int filemap_write_and_wait(struct address_space *mapping);
extern int filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend);
@@ -2919,6 +2938,7 @@ extern void lockdep_annotate_inode_mutex_key(struct inode *inode);
static inline void lockdep_annotate_inode_mutex_key(struct inode *inode) { };
#endif
extern void unlock_new_inode(struct inode *);
+extern void discard_new_inode(struct inode *);
extern unsigned int get_next_ino(void);
extern void evict_inodes(struct super_block *sb);
diff --git a/include/linux/fsi-sbefifo.h b/include/linux/fsi-sbefifo.h
new file mode 100644
index 000000000000..13f9ebeaa25e
--- /dev/null
+++ b/include/linux/fsi-sbefifo.h
@@ -0,0 +1,33 @@
+/*
+ * SBEFIFO FSI Client device driver
+ *
+ * Copyright (C) IBM Corporation 2017
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERGCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef LINUX_FSI_SBEFIFO_H
+#define LINUX_FSI_SBEFIFO_H
+
+#define SBEFIFO_CMD_PUT_OCC_SRAM 0xa404
+#define SBEFIFO_CMD_GET_OCC_SRAM 0xa403
+#define SBEFIFO_CMD_GET_SBE_FFDC 0xa801
+
+#define SBEFIFO_MAX_FFDC_SIZE 0x2000
+
+struct device;
+
+int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len,
+ __be32 *response, size_t *resp_len);
+
+int sbefifo_parse_status(struct device *dev, u16 cmd, __be32 *response,
+ size_t resp_len, size_t *data_len);
+
+#endif /* LINUX_FSI_SBEFIFO_H */
diff --git a/include/linux/fsi.h b/include/linux/fsi.h
index 141fd38d061f..ec3be0d5b786 100644
--- a/include/linux/fsi.h
+++ b/include/linux/fsi.h
@@ -76,8 +76,18 @@ extern int fsi_slave_read(struct fsi_slave *slave, uint32_t addr,
extern int fsi_slave_write(struct fsi_slave *slave, uint32_t addr,
const void *val, size_t size);
+extern struct bus_type fsi_bus_type;
+extern const struct device_type fsi_cdev_type;
+enum fsi_dev_type {
+ fsi_dev_cfam,
+ fsi_dev_sbefifo,
+ fsi_dev_scom,
+ fsi_dev_occ
+};
-extern struct bus_type fsi_bus_type;
+extern int fsi_get_new_minor(struct fsi_device *fdev, enum fsi_dev_type type,
+ dev_t *out_dev, int *out_index);
+extern void fsi_free_minor(dev_t dev);
#endif /* LINUX_FSI_H */
diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h
index 3efa3b861d44..941b11811f85 100644
--- a/include/linux/fsl/guts.h
+++ b/include/linux/fsl/guts.h
@@ -16,6 +16,7 @@
#define __FSL_GUTS_H__
#include <linux/types.h>
+#include <linux/io.h>
/**
* Global Utility Registers.
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h
index b462d9ea8007..c1f003aadcce 100644
--- a/include/linux/fsl/ptp_qoriq.h
+++ b/include/linux/fsl/ptp_qoriq.h
@@ -11,9 +11,8 @@
/*
* qoriq ptp registers
- * Generated by regen.tcl on Thu May 13 01:38:57 PM CEST 2010
*/
-struct qoriq_ptp_registers {
+struct ctrl_regs {
u32 tmr_ctrl; /* Timer control register */
u32 tmr_tevent; /* Timestamp event register */
u32 tmr_temask; /* Timer event mask register */
@@ -28,22 +27,47 @@ struct qoriq_ptp_registers {
u8 res1[4];
u32 tmroff_h; /* Timer offset high */
u32 tmroff_l; /* Timer offset low */
- u8 res2[8];
+};
+
+struct alarm_regs {
u32 tmr_alarm1_h; /* Timer alarm 1 high register */
u32 tmr_alarm1_l; /* Timer alarm 1 high register */
u32 tmr_alarm2_h; /* Timer alarm 2 high register */
u32 tmr_alarm2_l; /* Timer alarm 2 high register */
- u8 res3[48];
+};
+
+struct fiper_regs {
u32 tmr_fiper1; /* Timer fixed period interval */
u32 tmr_fiper2; /* Timer fixed period interval */
u32 tmr_fiper3; /* Timer fixed period interval */
- u8 res4[20];
+};
+
+struct etts_regs {
u32 tmr_etts1_h; /* Timestamp of general purpose external trigger */
u32 tmr_etts1_l; /* Timestamp of general purpose external trigger */
u32 tmr_etts2_h; /* Timestamp of general purpose external trigger */
u32 tmr_etts2_l; /* Timestamp of general purpose external trigger */
};
+struct qoriq_ptp_registers {
+ struct ctrl_regs __iomem *ctrl_regs;
+ struct alarm_regs __iomem *alarm_regs;
+ struct fiper_regs __iomem *fiper_regs;
+ struct etts_regs __iomem *etts_regs;
+};
+
+/* Offset definitions for the four register groups */
+#define CTRL_REGS_OFFSET 0x0
+#define ALARM_REGS_OFFSET 0x40
+#define FIPER_REGS_OFFSET 0x80
+#define ETTS_REGS_OFFSET 0xa0
+
+#define FMAN_CTRL_REGS_OFFSET 0x80
+#define FMAN_ALARM_REGS_OFFSET 0xb8
+#define FMAN_FIPER_REGS_OFFSET 0xd0
+#define FMAN_ETTS_REGS_OFFSET 0xe0
+
+
/* Bit definitions for the TMR_CTRL register */
#define ALM1P (1<<31) /* Alarm1 output polarity */
#define ALM2P (1<<30) /* Alarm2 output polarity */
@@ -103,12 +127,16 @@ struct qoriq_ptp_registers {
#define DRIVER "ptp_qoriq"
-#define DEFAULT_CKSEL 1
#define N_EXT_TS 2
-#define REG_SIZE sizeof(struct qoriq_ptp_registers)
+
+#define DEFAULT_CKSEL 1
+#define DEFAULT_TMR_PRSC 2
+#define DEFAULT_FIPER1_PERIOD 1000000000
+#define DEFAULT_FIPER2_PERIOD 100000
struct qoriq_ptp {
- struct qoriq_ptp_registers __iomem *regs;
+ void __iomem *base;
+ struct qoriq_ptp_registers regs;
spinlock_t lock; /* protects regs */
struct ptp_clock *clock;
struct ptp_clock_info caps;
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index bdaf22582f6e..fd1ce10553bf 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -30,11 +30,7 @@ static inline int fsnotify_parent(const struct path *path, struct dentry *dentry
static inline int fsnotify_perm(struct file *file, int mask)
{
const struct path *path = &file->f_path;
- /*
- * Do not use file_inode() here or anywhere in this file to get the
- * inode. That would break *notity on overlayfs.
- */
- struct inode *inode = path->dentry->d_inode;
+ struct inode *inode = file_inode(file);
__u32 fsnotify_mask = 0;
int ret;
@@ -178,7 +174,7 @@ static inline void fsnotify_mkdir(struct inode *inode, struct dentry *dentry)
static inline void fsnotify_access(struct file *file)
{
const struct path *path = &file->f_path;
- struct inode *inode = path->dentry->d_inode;
+ struct inode *inode = file_inode(file);
__u32 mask = FS_ACCESS;
if (S_ISDIR(inode->i_mode))
@@ -196,7 +192,7 @@ static inline void fsnotify_access(struct file *file)
static inline void fsnotify_modify(struct file *file)
{
const struct path *path = &file->f_path;
- struct inode *inode = path->dentry->d_inode;
+ struct inode *inode = file_inode(file);
__u32 mask = FS_MODIFY;
if (S_ISDIR(inode->i_mode))
@@ -214,7 +210,7 @@ static inline void fsnotify_modify(struct file *file)
static inline void fsnotify_open(struct file *file)
{
const struct path *path = &file->f_path;
- struct inode *inode = path->dentry->d_inode;
+ struct inode *inode = file_inode(file);
__u32 mask = FS_OPEN;
if (S_ISDIR(inode->i_mode))
@@ -230,7 +226,7 @@ static inline void fsnotify_open(struct file *file)
static inline void fsnotify_close(struct file *file)
{
const struct path *path = &file->f_path;
- struct inode *inode = path->dentry->d_inode;
+ struct inode *inode = file_inode(file);
fmode_t mode = file->f_mode;
__u32 mask = (mode & FMODE_WRITE) ? FS_CLOSE_WRITE : FS_CLOSE_NOWRITE;
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index b38964a7a521..b8f4182f42f1 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -84,6 +84,8 @@ struct fsnotify_event_private_data;
struct fsnotify_fname;
struct fsnotify_iter_info;
+struct mem_cgroup;
+
/*
* Each group much define these ops. The fsnotify infrastructure will call
* these operations for each relevant group.
@@ -127,6 +129,8 @@ struct fsnotify_event {
* everything will be cleaned up.
*/
struct fsnotify_group {
+ const struct fsnotify_ops *ops; /* how this group handles things */
+
/*
* How the refcnt is used is up to each group. When the refcnt hits 0
* fsnotify will clean up all of the resources associated with this group.
@@ -137,8 +141,6 @@ struct fsnotify_group {
*/
refcount_t refcnt; /* things with interest in this group */
- const struct fsnotify_ops *ops; /* how this group handles things */
-
/* needed to send notification to userspace */
spinlock_t notification_lock; /* protect the notification_list */
struct list_head notification_list; /* list of event_holder this group needs to send to userspace */
@@ -160,6 +162,8 @@ struct fsnotify_group {
atomic_t num_marks; /* 1 for each mark and 1 for not being
* past the point of no return when freeing
* a group */
+ atomic_t user_waits; /* Number of tasks waiting for user
+ * response */
struct list_head marks_list; /* all inode marks for this group */
struct fasync_struct *fsn_fa; /* async notification */
@@ -167,8 +171,8 @@ struct fsnotify_group {
struct fsnotify_event *overflow_event; /* Event we queue when the
* notification list is too
* full */
- atomic_t user_waits; /* Number of tasks waiting for user
- * response */
+
+ struct mem_cgroup *memcg; /* memcg to charge allocations */
/* groups can define private fields here or use the void *private */
union {
@@ -210,6 +214,11 @@ enum fsnotify_obj_type {
#define FSNOTIFY_OBJ_TYPE_VFSMOUNT_FL (1U << FSNOTIFY_OBJ_TYPE_VFSMOUNT)
#define FSNOTIFY_OBJ_ALL_TYPES_MASK ((1U << FSNOTIFY_OBJ_TYPE_COUNT) - 1)
+static inline bool fsnotify_valid_obj_type(unsigned int type)
+{
+ return (type < FSNOTIFY_OBJ_TYPE_COUNT);
+}
+
struct fsnotify_iter_info {
struct fsnotify_mark *marks[FSNOTIFY_OBJ_TYPE_COUNT];
unsigned int report_mask;
@@ -251,6 +260,13 @@ FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT)
for (type = 0; type < FSNOTIFY_OBJ_TYPE_COUNT; type++)
/*
+ * fsnotify_connp_t is what we embed in objects which connector can be attached
+ * to. fsnotify_connp_t * is how we refer from connector back to object.
+ */
+struct fsnotify_mark_connector;
+typedef struct fsnotify_mark_connector __rcu *fsnotify_connp_t;
+
+/*
* Inode / vfsmount point to this structure which tracks all marks attached to
* the inode / vfsmount. The reference to inode / vfsmount is held by this
* structure. We destroy this structure when there are no more marks attached
@@ -259,9 +275,9 @@ FSNOTIFY_ITER_FUNCS(vfsmount, VFSMOUNT)
struct fsnotify_mark_connector {
spinlock_t lock;
unsigned int type; /* Type of object [lock] */
- union { /* Object pointer [lock] */
- struct inode *inode;
- struct vfsmount *mnt;
+ union {
+ /* Object pointer [lock] */
+ fsnotify_connp_t *obj;
/* Used listing heads to free after srcu period expires */
struct fsnotify_mark_connector *destroy_next;
};
@@ -389,32 +405,36 @@ extern struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group
/* functions used to manipulate the marks attached to inodes */
+/* Get mask of events for a list of marks */
+extern __u32 fsnotify_conn_mask(struct fsnotify_mark_connector *conn);
/* Calculate mask of events for a list of marks */
extern void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn);
extern void fsnotify_init_mark(struct fsnotify_mark *mark,
struct fsnotify_group *group);
/* Find mark belonging to given group in the list of marks */
-extern struct fsnotify_mark *fsnotify_find_mark(
- struct fsnotify_mark_connector __rcu **connp,
- struct fsnotify_group *group);
-/* attach the mark to the inode or vfsmount */
-extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct inode *inode,
- struct vfsmount *mnt, int allow_dups);
+extern struct fsnotify_mark *fsnotify_find_mark(fsnotify_connp_t *connp,
+ struct fsnotify_group *group);
+/* attach the mark to the object */
+extern int fsnotify_add_mark(struct fsnotify_mark *mark,
+ fsnotify_connp_t *connp, unsigned int type,
+ int allow_dups);
extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
- struct inode *inode, struct vfsmount *mnt,
+ fsnotify_connp_t *connp, unsigned int type,
int allow_dups);
/* attach the mark to the inode */
static inline int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
struct inode *inode,
int allow_dups)
{
- return fsnotify_add_mark(mark, inode, NULL, allow_dups);
+ return fsnotify_add_mark(mark, &inode->i_fsnotify_marks,
+ FSNOTIFY_OBJ_TYPE_INODE, allow_dups);
}
static inline int fsnotify_add_inode_mark_locked(struct fsnotify_mark *mark,
struct inode *inode,
int allow_dups)
{
- return fsnotify_add_mark_locked(mark, inode, NULL, allow_dups);
+ return fsnotify_add_mark_locked(mark, &inode->i_fsnotify_marks,
+ FSNOTIFY_OBJ_TYPE_INODE, allow_dups);
}
/* given a group and a mark, flag mark to be freed when all references are dropped */
extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 8154f4920fcb..a397907e8d72 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -223,7 +223,6 @@ extern enum ftrace_tracing_type_t ftrace_tracing_type;
*/
int register_ftrace_function(struct ftrace_ops *ops);
int unregister_ftrace_function(struct ftrace_ops *ops);
-void clear_ftrace_function(void);
extern void ftrace_stub(unsigned long a0, unsigned long a1,
struct ftrace_ops *op, struct pt_regs *regs);
@@ -235,11 +234,6 @@ extern void ftrace_stub(unsigned long a0, unsigned long a1,
*/
#define register_ftrace_function(ops) ({ 0; })
#define unregister_ftrace_function(ops) ({ 0; })
-static inline int ftrace_nr_registered_ops(void)
-{
- return 0;
-}
-static inline void clear_ftrace_function(void) { }
static inline void ftrace_kill(void) { }
static inline void ftrace_free_init_mem(void) { }
static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
@@ -330,8 +324,6 @@ struct seq_file;
extern int ftrace_text_reserved(const void *start, const void *end);
-extern int ftrace_nr_registered_ops(void);
-
struct ftrace_ops *ftrace_ops_trampoline(unsigned long addr);
bool is_ftrace_trampoline(unsigned long addr);
@@ -709,16 +701,7 @@ static inline unsigned long get_lock_parent_ip(void)
return CALLER_ADDR2;
}
-#ifdef CONFIG_IRQSOFF_TRACER
- extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
- extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
-#else
- static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
- static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
-#endif
-
-#if defined(CONFIG_PREEMPT_TRACER) || \
- (defined(CONFIG_DEBUG_PREEMPT) && defined(CONFIG_PREEMPTIRQ_EVENTS))
+#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
extern void trace_preempt_on(unsigned long a0, unsigned long a1);
extern void trace_preempt_off(unsigned long a0, unsigned long a1);
#else
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 4fe8f289b3f6..faebf0ca0686 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -45,7 +45,7 @@ struct fwnode_endpoint {
struct fwnode_reference_args {
struct fwnode_handle *fwnode;
unsigned int nargs;
- unsigned int args[NR_FWNODE_REFERENCE_ARGS];
+ u64 args[NR_FWNODE_REFERENCE_ARGS];
};
/**
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 6cb8a5789668..57864422a2c8 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -16,6 +16,7 @@
#include <linux/slab.h>
#include <linux/percpu-refcount.h>
#include <linux/uuid.h>
+#include <linux/blk_types.h>
#ifdef CONFIG_BLOCK
@@ -82,10 +83,10 @@ struct partition {
} __attribute__((packed));
struct disk_stats {
- unsigned long sectors[2]; /* READs and WRITEs */
- unsigned long ios[2];
- unsigned long merges[2];
- unsigned long ticks[2];
+ unsigned long sectors[NR_STAT_GROUPS];
+ unsigned long ios[NR_STAT_GROUPS];
+ unsigned long merges[NR_STAT_GROUPS];
+ unsigned long ticks[NR_STAT_GROUPS];
unsigned long io_ticks;
unsigned long time_in_queue;
};
@@ -353,6 +354,11 @@ static inline void free_part_stats(struct hd_struct *part)
#endif /* CONFIG_SMP */
+#define part_stat_read_accum(part, field) \
+ (part_stat_read(part, field[STAT_READ]) + \
+ part_stat_read(part, field[STAT_WRITE]) + \
+ part_stat_read(part, field[STAT_DISCARD]))
+
#define part_stat_add(cpu, part, field, addnd) do { \
__part_stat_add((cpu), (part), field, addnd); \
if ((part)->partno) \
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index a6afcec53795..24bcc5eec6b4 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -59,29 +59,32 @@ struct vm_area_struct;
#define __GFP_MOVABLE ((__force gfp_t)___GFP_MOVABLE) /* ZONE_MOVABLE allowed */
#define GFP_ZONEMASK (__GFP_DMA|__GFP_HIGHMEM|__GFP_DMA32|__GFP_MOVABLE)
-/*
+/**
+ * DOC: Page mobility and placement hints
+ *
* Page mobility and placement hints
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* These flags provide hints about how mobile the page is. Pages with similar
* mobility are placed within the same pageblocks to minimise problems due
* to external fragmentation.
*
- * __GFP_MOVABLE (also a zone modifier) indicates that the page can be
- * moved by page migration during memory compaction or can be reclaimed.
+ * %__GFP_MOVABLE (also a zone modifier) indicates that the page can be
+ * moved by page migration during memory compaction or can be reclaimed.
*
- * __GFP_RECLAIMABLE is used for slab allocations that specify
- * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
+ * %__GFP_RECLAIMABLE is used for slab allocations that specify
+ * SLAB_RECLAIM_ACCOUNT and whose pages can be freed via shrinkers.
*
- * __GFP_WRITE indicates the caller intends to dirty the page. Where possible,
- * these pages will be spread between local zones to avoid all the dirty
- * pages being in one zone (fair zone allocation policy).
+ * %__GFP_WRITE indicates the caller intends to dirty the page. Where possible,
+ * these pages will be spread between local zones to avoid all the dirty
+ * pages being in one zone (fair zone allocation policy).
*
- * __GFP_HARDWALL enforces the cpuset memory allocation policy.
+ * %__GFP_HARDWALL enforces the cpuset memory allocation policy.
*
- * __GFP_THISNODE forces the allocation to be satisified from the requested
- * node with no fallbacks or placement policy enforcements.
+ * %__GFP_THISNODE forces the allocation to be satisified from the requested
+ * node with no fallbacks or placement policy enforcements.
*
- * __GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
+ * %__GFP_ACCOUNT causes the allocation to be accounted to kmemcg.
*/
#define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE)
#define __GFP_WRITE ((__force gfp_t)___GFP_WRITE)
@@ -89,54 +92,60 @@ struct vm_area_struct;
#define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)
#define __GFP_ACCOUNT ((__force gfp_t)___GFP_ACCOUNT)
-/*
+/**
+ * DOC: Watermark modifiers
+ *
* Watermark modifiers -- controls access to emergency reserves
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
- * __GFP_HIGH indicates that the caller is high-priority and that granting
- * the request is necessary before the system can make forward progress.
- * For example, creating an IO context to clean pages.
+ * %__GFP_HIGH indicates that the caller is high-priority and that granting
+ * the request is necessary before the system can make forward progress.
+ * For example, creating an IO context to clean pages.
*
- * __GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
- * high priority. Users are typically interrupt handlers. This may be
- * used in conjunction with __GFP_HIGH
+ * %__GFP_ATOMIC indicates that the caller cannot reclaim or sleep and is
+ * high priority. Users are typically interrupt handlers. This may be
+ * used in conjunction with %__GFP_HIGH
*
- * __GFP_MEMALLOC allows access to all memory. This should only be used when
- * the caller guarantees the allocation will allow more memory to be freed
- * very shortly e.g. process exiting or swapping. Users either should
- * be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
+ * %__GFP_MEMALLOC allows access to all memory. This should only be used when
+ * the caller guarantees the allocation will allow more memory to be freed
+ * very shortly e.g. process exiting or swapping. Users either should
+ * be the MM or co-ordinating closely with the VM (e.g. swap over NFS).
*
- * __GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
- * This takes precedence over the __GFP_MEMALLOC flag if both are set.
+ * %__GFP_NOMEMALLOC is used to explicitly forbid access to emergency reserves.
+ * This takes precedence over the %__GFP_MEMALLOC flag if both are set.
*/
#define __GFP_ATOMIC ((__force gfp_t)___GFP_ATOMIC)
#define __GFP_HIGH ((__force gfp_t)___GFP_HIGH)
#define __GFP_MEMALLOC ((__force gfp_t)___GFP_MEMALLOC)
#define __GFP_NOMEMALLOC ((__force gfp_t)___GFP_NOMEMALLOC)
-/*
+/**
+ * DOC: Reclaim modifiers
+ *
* Reclaim modifiers
+ * ~~~~~~~~~~~~~~~~~
*
- * __GFP_IO can start physical IO.
+ * %__GFP_IO can start physical IO.
*
- * __GFP_FS can call down to the low-level FS. Clearing the flag avoids the
- * allocator recursing into the filesystem which might already be holding
- * locks.
+ * %__GFP_FS can call down to the low-level FS. Clearing the flag avoids the
+ * allocator recursing into the filesystem which might already be holding
+ * locks.
*
- * __GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
- * This flag can be cleared to avoid unnecessary delays when a fallback
- * option is available.
+ * %__GFP_DIRECT_RECLAIM indicates that the caller may enter direct reclaim.
+ * This flag can be cleared to avoid unnecessary delays when a fallback
+ * option is available.
*
- * __GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
- * the low watermark is reached and have it reclaim pages until the high
- * watermark is reached. A caller may wish to clear this flag when fallback
- * options are available and the reclaim is likely to disrupt the system. The
- * canonical example is THP allocation where a fallback is cheap but
- * reclaim/compaction may cause indirect stalls.
+ * %__GFP_KSWAPD_RECLAIM indicates that the caller wants to wake kswapd when
+ * the low watermark is reached and have it reclaim pages until the high
+ * watermark is reached. A caller may wish to clear this flag when fallback
+ * options are available and the reclaim is likely to disrupt the system. The
+ * canonical example is THP allocation where a fallback is cheap but
+ * reclaim/compaction may cause indirect stalls.
*
- * __GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
+ * %__GFP_RECLAIM is shorthand to allow/forbid both direct and kswapd reclaim.
*
* The default allocator behavior depends on the request size. We have a concept
- * of so called costly allocations (with order > PAGE_ALLOC_COSTLY_ORDER).
+ * of so called costly allocations (with order > %PAGE_ALLOC_COSTLY_ORDER).
* !costly allocations are too essential to fail so they are implicitly
* non-failing by default (with some exceptions like OOM victims might fail so
* the caller still has to check for failures) while costly requests try to be
@@ -144,40 +153,40 @@ struct vm_area_struct;
* The following three modifiers might be used to override some of these
* implicit rules
*
- * __GFP_NORETRY: The VM implementation will try only very lightweight
- * memory direct reclaim to get some memory under memory pressure (thus
- * it can sleep). It will avoid disruptive actions like OOM killer. The
- * caller must handle the failure which is quite likely to happen under
- * heavy memory pressure. The flag is suitable when failure can easily be
- * handled at small cost, such as reduced throughput
- *
- * __GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
- * procedures that have previously failed if there is some indication
- * that progress has been made else where. It can wait for other
- * tasks to attempt high level approaches to freeing memory such as
- * compaction (which removes fragmentation) and page-out.
- * There is still a definite limit to the number of retries, but it is
- * a larger limit than with __GFP_NORETRY.
- * Allocations with this flag may fail, but only when there is
- * genuinely little unused memory. While these allocations do not
- * directly trigger the OOM killer, their failure indicates that
- * the system is likely to need to use the OOM killer soon. The
- * caller must handle failure, but can reasonably do so by failing
- * a higher-level request, or completing it only in a much less
- * efficient manner.
- * If the allocation does fail, and the caller is in a position to
- * free some non-essential memory, doing so could benefit the system
- * as a whole.
- *
- * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
- * cannot handle allocation failures. The allocation could block
- * indefinitely but will never return with failure. Testing for
- * failure is pointless.
- * New users should be evaluated carefully (and the flag should be
- * used only when there is no reasonable failure policy) but it is
- * definitely preferable to use the flag rather than opencode endless
- * loop around allocator.
- * Using this flag for costly allocations is _highly_ discouraged.
+ * %__GFP_NORETRY: The VM implementation will try only very lightweight
+ * memory direct reclaim to get some memory under memory pressure (thus
+ * it can sleep). It will avoid disruptive actions like OOM killer. The
+ * caller must handle the failure which is quite likely to happen under
+ * heavy memory pressure. The flag is suitable when failure can easily be
+ * handled at small cost, such as reduced throughput
+ *
+ * %__GFP_RETRY_MAYFAIL: The VM implementation will retry memory reclaim
+ * procedures that have previously failed if there is some indication
+ * that progress has been made else where. It can wait for other
+ * tasks to attempt high level approaches to freeing memory such as
+ * compaction (which removes fragmentation) and page-out.
+ * There is still a definite limit to the number of retries, but it is
+ * a larger limit than with %__GFP_NORETRY.
+ * Allocations with this flag may fail, but only when there is
+ * genuinely little unused memory. While these allocations do not
+ * directly trigger the OOM killer, their failure indicates that
+ * the system is likely to need to use the OOM killer soon. The
+ * caller must handle failure, but can reasonably do so by failing
+ * a higher-level request, or completing it only in a much less
+ * efficient manner.
+ * If the allocation does fail, and the caller is in a position to
+ * free some non-essential memory, doing so could benefit the system
+ * as a whole.
+ *
+ * %__GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller
+ * cannot handle allocation failures. The allocation could block
+ * indefinitely but will never return with failure. Testing for
+ * failure is pointless.
+ * New users should be evaluated carefully (and the flag should be
+ * used only when there is no reasonable failure policy) but it is
+ * definitely preferable to use the flag rather than opencode endless
+ * loop around allocator.
+ * Using this flag for costly allocations is _highly_ discouraged.
*/
#define __GFP_IO ((__force gfp_t)___GFP_IO)
#define __GFP_FS ((__force gfp_t)___GFP_FS)
@@ -188,14 +197,17 @@ struct vm_area_struct;
#define __GFP_NOFAIL ((__force gfp_t)___GFP_NOFAIL)
#define __GFP_NORETRY ((__force gfp_t)___GFP_NORETRY)
-/*
+/**
+ * DOC: Action modifiers
+ *
* Action modifiers
+ * ~~~~~~~~~~~~~~~~
*
- * __GFP_NOWARN suppresses allocation failure reports.
+ * %__GFP_NOWARN suppresses allocation failure reports.
*
- * __GFP_COMP address compound page metadata.
+ * %__GFP_COMP address compound page metadata.
*
- * __GFP_ZERO returns a zeroed page on success.
+ * %__GFP_ZERO returns a zeroed page on success.
*/
#define __GFP_NOWARN ((__force gfp_t)___GFP_NOWARN)
#define __GFP_COMP ((__force gfp_t)___GFP_COMP)
@@ -208,66 +220,71 @@ struct vm_area_struct;
#define __GFP_BITS_SHIFT (23 + IS_ENABLED(CONFIG_LOCKDEP))
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
-/*
+/**
+ * DOC: Useful GFP flag combinations
+ *
+ * Useful GFP flag combinations
+ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *
* Useful GFP flag combinations that are commonly used. It is recommended
* that subsystems start with one of these combinations and then set/clear
- * __GFP_FOO flags as necessary.
- *
- * GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
- * watermark is applied to allow access to "atomic reserves"
- *
- * GFP_KERNEL is typical for kernel-internal allocations. The caller requires
- * ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
- *
- * GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
- * accounted to kmemcg.
- *
- * GFP_NOWAIT is for kernel allocations that should not stall for direct
- * reclaim, start physical IO or use any filesystem callback.
- *
- * GFP_NOIO will use direct reclaim to discard clean pages or slab pages
- * that do not require the starting of any physical IO.
- * Please try to avoid using this flag directly and instead use
- * memalloc_noio_{save,restore} to mark the whole scope which cannot
- * perform any IO with a short explanation why. All allocation requests
- * will inherit GFP_NOIO implicitly.
- *
- * GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
- * Please try to avoid using this flag directly and instead use
- * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't
- * recurse into the FS layer with a short explanation why. All allocation
- * requests will inherit GFP_NOFS implicitly.
- *
- * GFP_USER is for userspace allocations that also need to be directly
- * accessibly by the kernel or hardware. It is typically used by hardware
- * for buffers that are mapped to userspace (e.g. graphics) that hardware
- * still must DMA to. cpuset limits are enforced for these allocations.
- *
- * GFP_DMA exists for historical reasons and should be avoided where possible.
- * The flags indicates that the caller requires that the lowest zone be
- * used (ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
- * it would require careful auditing as some users really require it and
- * others use the flag to avoid lowmem reserves in ZONE_DMA and treat the
- * lowest zone as a type of emergency reserve.
- *
- * GFP_DMA32 is similar to GFP_DMA except that the caller requires a 32-bit
- * address.
- *
- * GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
- * do not need to be directly accessible by the kernel but that cannot
- * move once in use. An example may be a hardware allocation that maps
- * data directly into userspace but has no addressing limitations.
- *
- * GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
- * need direct access to but can use kmap() when access is required. They
- * are expected to be movable via page reclaim or page migration. Typically,
- * pages on the LRU would also be allocated with GFP_HIGHUSER_MOVABLE.
- *
- * GFP_TRANSHUGE and GFP_TRANSHUGE_LIGHT are used for THP allocations. They are
- * compound allocations that will generally fail quickly if memory is not
- * available and will not wake kswapd/kcompactd on failure. The _LIGHT
- * version does not attempt reclaim/compaction at all and is by default used
- * in page fault path, while the non-light is used by khugepaged.
+ * %__GFP_FOO flags as necessary.
+ *
+ * %GFP_ATOMIC users can not sleep and need the allocation to succeed. A lower
+ * watermark is applied to allow access to "atomic reserves"
+ *
+ * %GFP_KERNEL is typical for kernel-internal allocations. The caller requires
+ * %ZONE_NORMAL or a lower zone for direct access but can direct reclaim.
+ *
+ * %GFP_KERNEL_ACCOUNT is the same as GFP_KERNEL, except the allocation is
+ * accounted to kmemcg.
+ *
+ * %GFP_NOWAIT is for kernel allocations that should not stall for direct
+ * reclaim, start physical IO or use any filesystem callback.
+ *
+ * %GFP_NOIO will use direct reclaim to discard clean pages or slab pages
+ * that do not require the starting of any physical IO.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_noio_{save,restore} to mark the whole scope which cannot
+ * perform any IO with a short explanation why. All allocation requests
+ * will inherit GFP_NOIO implicitly.
+ *
+ * %GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't
+ * recurse into the FS layer with a short explanation why. All allocation
+ * requests will inherit GFP_NOFS implicitly.
+ *
+ * %GFP_USER is for userspace allocations that also need to be directly
+ * accessibly by the kernel or hardware. It is typically used by hardware
+ * for buffers that are mapped to userspace (e.g. graphics) that hardware
+ * still must DMA to. cpuset limits are enforced for these allocations.
+ *
+ * %GFP_DMA exists for historical reasons and should be avoided where possible.
+ * The flags indicates that the caller requires that the lowest zone be
+ * used (%ZONE_DMA or 16M on x86-64). Ideally, this would be removed but
+ * it would require careful auditing as some users really require it and
+ * others use the flag to avoid lowmem reserves in %ZONE_DMA and treat the
+ * lowest zone as a type of emergency reserve.
+ *
+ * %GFP_DMA32 is similar to %GFP_DMA except that the caller requires a 32-bit
+ * address.
+ *
+ * %GFP_HIGHUSER is for userspace allocations that may be mapped to userspace,
+ * do not need to be directly accessible by the kernel but that cannot
+ * move once in use. An example may be a hardware allocation that maps
+ * data directly into userspace but has no addressing limitations.
+ *
+ * %GFP_HIGHUSER_MOVABLE is for userspace allocations that the kernel does not
+ * need direct access to but can use kmap() when access is required. They
+ * are expected to be movable via page reclaim or page migration. Typically,
+ * pages on the LRU would also be allocated with %GFP_HIGHUSER_MOVABLE.
+ *
+ * %GFP_TRANSHUGE and %GFP_TRANSHUGE_LIGHT are used for THP allocations. They
+ * are compound allocations that will generally fail quickly if memory is not
+ * available and will not wake kswapd/kcompactd on failure. The _LIGHT
+ * version does not attempt reclaim/compaction at all and is by default used
+ * in page fault path, while the non-light is used by khugepaged.
*/
#define GFP_ATOMIC (__GFP_HIGH|__GFP_ATOMIC|__GFP_KSWAPD_RECLAIM)
#define GFP_KERNEL (__GFP_RECLAIM | __GFP_IO | __GFP_FS)
diff --git a/include/linux/gnss.h b/include/linux/gnss.h
new file mode 100644
index 000000000000..43546977098c
--- /dev/null
+++ b/include/linux/gnss.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * GNSS receiver support
+ *
+ * Copyright (C) 2018 Johan Hovold <johan@kernel.org>
+ */
+
+#ifndef _LINUX_GNSS_H
+#define _LINUX_GNSS_H
+
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/kfifo.h>
+#include <linux/mutex.h>
+#include <linux/rwsem.h>
+#include <linux/types.h>
+#include <linux/wait.h>
+
+struct gnss_device;
+
+enum gnss_type {
+ GNSS_TYPE_NMEA = 0,
+ GNSS_TYPE_SIRF,
+ GNSS_TYPE_UBX,
+
+ GNSS_TYPE_COUNT
+};
+
+struct gnss_operations {
+ int (*open)(struct gnss_device *gdev);
+ void (*close)(struct gnss_device *gdev);
+ int (*write_raw)(struct gnss_device *gdev, const unsigned char *buf,
+ size_t count);
+};
+
+struct gnss_device {
+ struct device dev;
+ struct cdev cdev;
+ int id;
+
+ enum gnss_type type;
+ unsigned long flags;
+
+ struct rw_semaphore rwsem;
+ const struct gnss_operations *ops;
+ unsigned int count;
+ unsigned int disconnected:1;
+
+ struct mutex read_mutex;
+ struct kfifo read_fifo;
+ wait_queue_head_t read_queue;
+
+ struct mutex write_mutex;
+ char *write_buf;
+};
+
+struct gnss_device *gnss_allocate_device(struct device *parent);
+void gnss_put_device(struct gnss_device *gdev);
+int gnss_register_device(struct gnss_device *gdev);
+void gnss_deregister_device(struct gnss_device *gdev);
+
+int gnss_insert_raw(struct gnss_device *gdev, const unsigned char *buf,
+ size_t count);
+
+static inline void gnss_set_drvdata(struct gnss_device *gdev, void *data)
+{
+ dev_set_drvdata(&gdev->dev, data);
+}
+
+static inline void *gnss_get_drvdata(struct gnss_device *gdev)
+{
+ return dev_get_drvdata(&gdev->dev);
+}
+
+#endif /* _LINUX_GNSS_H */
diff --git a/include/linux/goldfish.h b/include/linux/goldfish.h
index 2835c150c3ff..265a099cd3b8 100644
--- a/include/linux/goldfish.h
+++ b/include/linux/goldfish.h
@@ -2,14 +2,20 @@
#ifndef __LINUX_GOLDFISH_H
#define __LINUX_GOLDFISH_H
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
/* Helpers for Goldfish virtual platform */
static inline void gf_write_ptr(const void *ptr, void __iomem *portl,
void __iomem *porth)
{
- writel((u32)(unsigned long)ptr, portl);
+ const unsigned long addr = (unsigned long)ptr;
+
+ writel(lower_32_bits(addr), portl);
#ifdef CONFIG_64BIT
- writel((unsigned long)ptr >> 32, porth);
+ writel(upper_32_bits(addr), porth);
#endif
}
@@ -17,9 +23,9 @@ static inline void gf_write_dma_addr(const dma_addr_t addr,
void __iomem *portl,
void __iomem *porth)
{
- writel((u32)addr, portl);
+ writel(lower_32_bits(addr), portl);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
- writel(addr >> 32, porth);
+ writel(upper_32_bits(addr), porth);
#endif
}
diff --git a/include/linux/gpio.h b/include/linux/gpio.h
index 91ed23468530..39745b8bdd65 100644
--- a/include/linux/gpio.h
+++ b/include/linux/gpio.h
@@ -14,7 +14,7 @@
#include <linux/errno.h>
-/* see Documentation/gpio/gpio-legacy.txt */
+/* see Documentation/driver-api/gpio/legacy.rst */
/* make these flag values available regardless of GPIO kconfig options */
#define GPIOF_DIR_OUT (0 << 0)
diff --git a/include/linux/gpio/aspeed.h b/include/linux/gpio/aspeed.h
new file mode 100644
index 000000000000..1bfb3cdc86d0
--- /dev/null
+++ b/include/linux/gpio/aspeed.h
@@ -0,0 +1,15 @@
+#ifndef __GPIO_ASPEED_H
+#define __GPIO_ASPEED_H
+
+struct aspeed_gpio_copro_ops {
+ int (*request_access)(void *data);
+ int (*release_access)(void *data);
+};
+
+int aspeed_gpio_copro_grab_gpio(struct gpio_desc *desc,
+ u16 *vreg_offset, u16 *dreg_offset, u8 *bit);
+int aspeed_gpio_copro_release_gpio(struct gpio_desc *desc);
+int aspeed_gpio_copro_set_ops(const struct aspeed_gpio_copro_ops *ops, void *data);
+
+
+#endif /* __GPIO_ASPEED_H */
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 243112c7fa7d..21ddbe440030 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -41,11 +41,8 @@ enum gpiod_flags {
GPIOD_OUT_LOW = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT,
GPIOD_OUT_HIGH = GPIOD_FLAGS_BIT_DIR_SET | GPIOD_FLAGS_BIT_DIR_OUT |
GPIOD_FLAGS_BIT_DIR_VAL,
- GPIOD_OUT_LOW_OPEN_DRAIN = GPIOD_FLAGS_BIT_DIR_SET |
- GPIOD_FLAGS_BIT_DIR_OUT | GPIOD_FLAGS_BIT_OPEN_DRAIN,
- GPIOD_OUT_HIGH_OPEN_DRAIN = GPIOD_FLAGS_BIT_DIR_SET |
- GPIOD_FLAGS_BIT_DIR_OUT | GPIOD_FLAGS_BIT_DIR_VAL |
- GPIOD_FLAGS_BIT_OPEN_DRAIN,
+ GPIOD_OUT_LOW_OPEN_DRAIN = GPIOD_OUT_LOW | GPIOD_FLAGS_BIT_OPEN_DRAIN,
+ GPIOD_OUT_HIGH_OPEN_DRAIN = GPIOD_OUT_HIGH | GPIOD_FLAGS_BIT_OPEN_DRAIN,
};
#ifdef CONFIG_GPIOLIB
@@ -145,6 +142,7 @@ int gpiod_is_active_low(const struct gpio_desc *desc);
int gpiod_cansleep(const struct gpio_desc *desc);
int gpiod_to_irq(const struct gpio_desc *desc);
+void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name);
/* Convert between the old gpio_ and new gpiod_ interfaces */
struct gpio_desc *gpio_to_desc(unsigned gpio);
@@ -467,6 +465,12 @@ static inline int gpiod_to_irq(const struct gpio_desc *desc)
return -EINVAL;
}
+static inline void gpiod_set_consumer_name(struct gpio_desc *desc, const char *name)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(1);
+}
+
static inline struct gpio_desc *gpio_to_desc(unsigned gpio)
{
return ERR_PTR(-EINVAL);
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 5382b5183b7e..0ea328e71ec9 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -201,6 +201,8 @@ static inline struct gpio_irq_chip *to_gpio_irq_chip(struct irq_chip *chip)
* @reg_set: output set register (out=high) for generic GPIO
* @reg_clr: output clear register (out=low) for generic GPIO
* @reg_dir: direction setting register for generic GPIO
+ * @bgpio_dir_inverted: indicates that the direction register is inverted
+ * (gpiolib private state variable)
* @bgpio_bits: number of register bits used for a generic GPIO i.e.
* <register width> * 8
* @bgpio_lock: used to lock chip->bgpio_data. Also, this is needed to keep
@@ -267,6 +269,7 @@ struct gpio_chip {
void __iomem *reg_set;
void __iomem *reg_clr;
void __iomem *reg_dir;
+ bool bgpio_dir_inverted;
int bgpio_bits;
spinlock_t bgpio_lock;
unsigned long bgpio_data;
diff --git a/include/linux/gpio_keys.h b/include/linux/gpio_keys.h
index 7160df54a6fe..3f84aeb81e48 100644
--- a/include/linux/gpio_keys.h
+++ b/include/linux/gpio_keys.h
@@ -2,6 +2,8 @@
#ifndef _GPIO_KEYS_H
#define _GPIO_KEYS_H
+#include <linux/types.h>
+
struct device;
/**
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 41a3d5775394..834e6461a690 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -190,6 +190,12 @@ struct hid_item {
* http://www.usb.org/developers/hidpage/HUTRR40RadioHIDUsagesFinal.pdf
*/
#define HID_GD_WIRELESS_RADIO_CTLS 0x0001000c
+/*
+ * System Multi-Axis, see:
+ * http://www.usb.org/developers/hidpage/HUTRR62_-_Generic_Desktop_CA_for_System_Multi-Axis_Controllers.txt
+ */
+#define HID_GD_SYSTEM_MULTIAXIS 0x0001000e
+
#define HID_GD_X 0x00010030
#define HID_GD_Y 0x00010031
#define HID_GD_Z 0x00010032
@@ -511,6 +517,7 @@ struct hid_output_fifo {
#define HID_STAT_ADDED BIT(0)
#define HID_STAT_PARSED BIT(1)
#define HID_STAT_DUP_DETECTED BIT(2)
+#define HID_STAT_REPROBED BIT(3)
struct hid_input {
struct list_head list;
@@ -579,7 +586,7 @@ struct hid_device { /* device report descriptor */
bool battery_avoid_query;
#endif
- unsigned int status; /* see STAT flags above */
+ unsigned long status; /* see STAT flags above */
unsigned claimed; /* Claimed by hidinput, hiddev? */
unsigned quirks; /* Various quirks the device can pull on us */
bool io_started; /* If IO has started */
@@ -637,12 +644,13 @@ static inline void hid_set_drvdata(struct hid_device *hdev, void *data)
struct hid_parser {
struct hid_global global;
struct hid_global global_stack[HID_GLOBAL_STACK_SIZE];
- unsigned global_stack_ptr;
+ unsigned int global_stack_ptr;
struct hid_local local;
- unsigned collection_stack[HID_COLLECTION_STACK_SIZE];
- unsigned collection_stack_ptr;
+ unsigned int *collection_stack;
+ unsigned int collection_stack_ptr;
+ unsigned int collection_stack_size;
struct hid_device *device;
- unsigned scan_flags;
+ unsigned int scan_flags;
};
struct hid_class_descriptor {
@@ -893,6 +901,8 @@ const struct hid_device_id *hid_match_id(const struct hid_device *hdev,
const struct hid_device_id *id);
const struct hid_device_id *hid_match_device(struct hid_device *hdev,
struct hid_driver *hdrv);
+bool hid_compare_device_paths(struct hid_device *hdev_a,
+ struct hid_device *hdev_b, char separator);
s32 hid_snto32(__u32 value, unsigned n);
__u32 hid_field_extract(const struct hid_device *hid, __u8 *report,
unsigned offset, unsigned n);
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index a8a126259bc4..27e3e32135a8 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -6,7 +6,7 @@
#include <linux/fs.h> /* only for vma_is_dax() */
-extern int do_huge_pmd_anonymous_page(struct vm_fault *vmf);
+extern vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
extern int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
struct vm_area_struct *vma);
@@ -23,7 +23,7 @@ static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
}
#endif
-extern int do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
+extern vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf, pmd_t orig_pmd);
extern struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
unsigned long addr,
pmd_t *pmd,
@@ -216,7 +216,7 @@ struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
pud_t *pud, int flags);
-extern int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
+extern vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd);
extern struct page *huge_zero_page;
@@ -321,7 +321,8 @@ static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
return NULL;
}
-static inline int do_huge_pmd_numa_page(struct vm_fault *vmf, pmd_t orig_pmd)
+static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf,
+ pmd_t orig_pmd)
{
return 0;
}
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 36fa6a2a82e3..6b68e345f0ca 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -105,7 +105,7 @@ void hugetlb_report_meminfo(struct seq_file *);
int hugetlb_report_node_meminfo(int, char *);
void hugetlb_show_meminfo(void);
unsigned long hugetlb_total_pages(void);
-int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
+vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, unsigned int flags);
int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm, pte_t *dst_pte,
struct vm_area_struct *dst_vma,
@@ -348,9 +348,6 @@ struct hstate {
struct huge_bootmem_page {
struct list_head list;
struct hstate *hstate;
-#ifdef CONFIG_HIGHMEM
- phys_addr_t phys;
-#endif
};
struct page *alloc_huge_page(struct vm_area_struct *vma,
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index e5fd2707b6df..9493d4a388db 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -93,6 +93,7 @@ enum hwmon_temp_attributes {
#define HWMON_T_MIN_ALARM BIT(hwmon_temp_min_alarm)
#define HWMON_T_MAX_ALARM BIT(hwmon_temp_max_alarm)
#define HWMON_T_CRIT_ALARM BIT(hwmon_temp_crit_alarm)
+#define HWMON_T_LCRIT_ALARM BIT(hwmon_temp_lcrit_alarm)
#define HWMON_T_EMERGENCY_ALARM BIT(hwmon_temp_emergency_alarm)
#define HWMON_T_FAULT BIT(hwmon_temp_fault)
#define HWMON_T_OFFSET BIT(hwmon_temp_offset)
@@ -187,12 +188,16 @@ enum hwmon_power_attributes {
hwmon_power_cap_hyst,
hwmon_power_cap_max,
hwmon_power_cap_min,
+ hwmon_power_min,
hwmon_power_max,
hwmon_power_crit,
+ hwmon_power_lcrit,
hwmon_power_label,
hwmon_power_alarm,
hwmon_power_cap_alarm,
+ hwmon_power_min_alarm,
hwmon_power_max_alarm,
+ hwmon_power_lcrit_alarm,
hwmon_power_crit_alarm,
};
@@ -213,12 +218,16 @@ enum hwmon_power_attributes {
#define HWMON_P_CAP_HYST BIT(hwmon_power_cap_hyst)
#define HWMON_P_CAP_MAX BIT(hwmon_power_cap_max)
#define HWMON_P_CAP_MIN BIT(hwmon_power_cap_min)
+#define HWMON_P_MIN BIT(hwmon_power_min)
#define HWMON_P_MAX BIT(hwmon_power_max)
+#define HWMON_P_LCRIT BIT(hwmon_power_lcrit)
#define HWMON_P_CRIT BIT(hwmon_power_crit)
#define HWMON_P_LABEL BIT(hwmon_power_label)
#define HWMON_P_ALARM BIT(hwmon_power_alarm)
#define HWMON_P_CAP_ALARM BIT(hwmon_power_cap_alarm)
+#define HWMON_P_MIN_ALARM BIT(hwmon_power_max_alarm)
#define HWMON_P_MAX_ALARM BIT(hwmon_power_max_alarm)
+#define HWMON_P_LCRIT_ALARM BIT(hwmon_power_lcrit_alarm)
#define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm)
enum hwmon_energy_attributes {
@@ -389,4 +398,27 @@ devm_hwmon_device_register_with_info(struct device *dev,
void hwmon_device_unregister(struct device *dev);
void devm_hwmon_device_unregister(struct device *dev);
+/**
+ * hwmon_is_bad_char - Is the char invalid in a hwmon name
+ * @ch: the char to be considered
+ *
+ * hwmon_is_bad_char() can be used to determine if the given character
+ * may not be used in a hwmon name.
+ *
+ * Returns true if the char is invalid, false otherwise.
+ */
+static inline bool hwmon_is_bad_char(const char ch)
+{
+ switch (ch) {
+ case '-':
+ case '*':
+ case ' ':
+ case '\t':
+ case '\n':
+ return true;
+ default:
+ return false;
+ }
+}
+
#endif
diff --git a/include/linux/hwspinlock.h b/include/linux/hwspinlock.h
index 57537e67b468..0afe693be5f4 100644
--- a/include/linux/hwspinlock.h
+++ b/include/linux/hwspinlock.h
@@ -52,7 +52,7 @@ struct hwspinlock_pdata {
int base_id;
};
-#if defined(CONFIG_HWSPINLOCK) || defined(CONFIG_HWSPINLOCK_MODULE)
+#ifdef CONFIG_HWSPINLOCK
int hwspin_lock_register(struct hwspinlock_device *bank, struct device *dev,
const struct hwspinlock_ops *ops, int base_id, int num_locks);
@@ -66,6 +66,17 @@ int __hwspin_lock_timeout(struct hwspinlock *, unsigned int, int,
unsigned long *);
int __hwspin_trylock(struct hwspinlock *, int, unsigned long *);
void __hwspin_unlock(struct hwspinlock *, int, unsigned long *);
+int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name);
+int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock);
+struct hwspinlock *devm_hwspin_lock_request(struct device *dev);
+struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
+ unsigned int id);
+int devm_hwspin_lock_unregister(struct device *dev,
+ struct hwspinlock_device *bank);
+int devm_hwspin_lock_register(struct device *dev,
+ struct hwspinlock_device *bank,
+ const struct hwspinlock_ops *ops,
+ int base_id, int num_locks);
#else /* !CONFIG_HWSPINLOCK */
@@ -125,6 +136,30 @@ static inline int hwspin_lock_get_id(struct hwspinlock *hwlock)
return 0;
}
+static inline
+int of_hwspin_lock_get_id_byname(struct device_node *np, const char *name)
+{
+ return 0;
+}
+
+static inline
+int devm_hwspin_lock_free(struct device *dev, struct hwspinlock *hwlock)
+{
+ return 0;
+}
+
+static inline struct hwspinlock *devm_hwspin_lock_request(struct device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline
+struct hwspinlock *devm_hwspin_lock_request_specific(struct device *dev,
+ unsigned int id)
+{
+ return ERR_PTR(-ENODEV);
+}
+
#endif /* !CONFIG_HWSPINLOCK */
/**
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 3a3012f57be4..efda23cf32c7 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -89,18 +89,33 @@ struct hv_ring_buffer {
u32 interrupt_mask;
/*
- * Win8 uses some of the reserved bits to implement
- * interrupt driven flow management. On the send side
- * we can request that the receiver interrupt the sender
- * when the ring transitions from being full to being able
- * to handle a message of size "pending_send_sz".
+ * WS2012/Win8 and later versions of Hyper-V implement interrupt
+ * driven flow management. The feature bit feat_pending_send_sz
+ * is set by the host on the host->guest ring buffer, and by the
+ * guest on the guest->host ring buffer.
*
- * Add necessary state for this enhancement.
+ * The meaning of the feature bit is a bit complex in that it has
+ * semantics that apply to both ring buffers. If the guest sets
+ * the feature bit in the guest->host ring buffer, the guest is
+ * telling the host that:
+ * 1) It will set the pending_send_sz field in the guest->host ring
+ * buffer when it is waiting for space to become available, and
+ * 2) It will read the pending_send_sz field in the host->guest
+ * ring buffer and interrupt the host when it frees enough space
+ *
+ * Similarly, if the host sets the feature bit in the host->guest
+ * ring buffer, the host is telling the guest that:
+ * 1) It will set the pending_send_sz field in the host->guest ring
+ * buffer when it is waiting for space to become available, and
+ * 2) It will read the pending_send_sz field in the guest->host
+ * ring buffer and interrupt the guest when it frees enough space
+ *
+ * If either the guest or host does not set the feature bit that it
+ * owns, that guest or host must do polling if it encounters a full
+ * ring buffer, and not signal the other end with an interrupt.
*/
u32 pending_send_sz;
-
u32 reserved1[12];
-
union {
struct {
u32 feat_pending_send_sz:1;
@@ -1046,6 +1061,8 @@ extern int vmbus_establish_gpadl(struct vmbus_channel *channel,
extern int vmbus_teardown_gpadl(struct vmbus_channel *channel,
u32 gpadl_handle);
+void vmbus_reset_channel_cb(struct vmbus_channel *channel);
+
extern int vmbus_recvpacket(struct vmbus_channel *channel,
void *buffer,
u32 bufferlen,
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 254cd34eeae2..b79387fd57da 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -140,9 +140,14 @@ extern int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
and probably just as fast.
Note that we use i2c_adapter here, because you do not need a specific
smbus adapter to call this function. */
-extern s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
- unsigned short flags, char read_write, u8 command,
- int size, union i2c_smbus_data *data);
+s32 i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write, u8 command,
+ int protocol, union i2c_smbus_data *data);
+
+/* Unlocked flavor */
+s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
+ unsigned short flags, char read_write, u8 command,
+ int protocol, union i2c_smbus_data *data);
/* Now follow the 'nice' access routines. These also document the calling
conventions of i2c_smbus_xfer. */
@@ -226,7 +231,6 @@ enum i2c_alert_protocol {
/**
* struct i2c_driver - represent an I2C device driver
* @class: What kind of i2c device we instantiate (for detect)
- * @attach_adapter: Callback for bus addition (deprecated)
* @probe: Callback for device binding - soon to be deprecated
* @probe_new: New callback for device binding
* @remove: Callback for device unbinding
@@ -263,11 +267,6 @@ enum i2c_alert_protocol {
struct i2c_driver {
unsigned int class;
- /* Notifies the driver that a new bus has appeared. You should avoid
- * using this, it will be removed in a near future.
- */
- int (*attach_adapter)(struct i2c_adapter *) __deprecated;
-
/* Standard driver model interfaces */
int (*probe)(struct i2c_client *, const struct i2c_device_id *);
int (*remove)(struct i2c_client *);
@@ -559,6 +558,7 @@ struct i2c_lock_operations {
* @scl_fall_ns: time SCL signal takes to fall in ns; t(f) in the I2C specification
* @scl_int_delay_ns: time IP core additionally needs to setup SCL in ns
* @sda_fall_ns: time SDA signal takes to fall in ns; t(f) in the I2C specification
+ * @sda_hold_ns: time IP core additionally needs to hold SDA in ns
*/
struct i2c_timings {
u32 bus_freq_hz;
@@ -566,6 +566,7 @@ struct i2c_timings {
u32 scl_fall_ns;
u32 scl_int_delay_ns;
u32 sda_fall_ns;
+ u32 sda_hold_ns;
};
/**
@@ -576,12 +577,14 @@ struct i2c_timings {
* recovery. Populated internally for generic GPIO recovery.
* @set_scl: This sets/clears the SCL line. Mandatory for generic SCL recovery.
* Populated internally for generic GPIO recovery.
- * @get_sda: This gets current value of SDA line. Optional for generic SCL
- * recovery. Populated internally, if sda_gpio is a valid GPIO, for generic
- * GPIO recovery.
- * @set_sda: This sets/clears the SDA line. Optional for generic SCL recovery.
- * Populated internally, if sda_gpio is a valid GPIO, for generic GPIO
- * recovery.
+ * @get_sda: This gets current value of SDA line. This or set_sda() is mandatory
+ * for generic SCL recovery. Populated internally, if sda_gpio is a valid
+ * GPIO, for generic GPIO recovery.
+ * @set_sda: This sets/clears the SDA line. This or get_sda() is mandatory for
+ * generic SCL recovery. Populated internally, if sda_gpio is a valid GPIO,
+ * for generic GPIO recovery.
+ * @get_bus_free: Returns the bus free state as seen from the IP core in case it
+ * has a more complex internal logic than just reading SDA. Optional.
* @prepare_recovery: This will be called before starting recovery. Platform may
* configure padmux here for SDA/SCL line or something else they want.
* @unprepare_recovery: This will be called after completing recovery. Platform
@@ -596,6 +599,7 @@ struct i2c_bus_recovery_info {
void (*set_scl)(struct i2c_adapter *adap, int val);
int (*get_sda)(struct i2c_adapter *adap);
void (*set_sda)(struct i2c_adapter *adap, int val);
+ int (*get_bus_free)(struct i2c_adapter *adap);
void (*prepare_recovery)(struct i2c_adapter *adap);
void (*unprepare_recovery)(struct i2c_adapter *adap);
@@ -653,6 +657,10 @@ struct i2c_adapter_quirks {
I2C_AQ_COMB_READ_SECOND | I2C_AQ_COMB_SAME_ADDR)
/* clock stretching is not supported */
#define I2C_AQ_NO_CLK_STRETCH BIT(4)
+/* message cannot have length of 0 */
+#define I2C_AQ_NO_ZERO_LEN_READ BIT(5)
+#define I2C_AQ_NO_ZERO_LEN_WRITE BIT(6)
+#define I2C_AQ_NO_ZERO_LEN (I2C_AQ_NO_ZERO_LEN_READ | I2C_AQ_NO_ZERO_LEN_WRITE)
/*
* i2c_adapter is the structure used to identify a physical i2c bus along
@@ -754,18 +762,6 @@ i2c_unlock_bus(struct i2c_adapter *adapter, unsigned int flags)
adapter->lock_ops->unlock_bus(adapter, flags);
}
-static inline void
-i2c_lock_adapter(struct i2c_adapter *adapter)
-{
- i2c_lock_bus(adapter, I2C_LOCK_ROOT_ADAPTER);
-}
-
-static inline void
-i2c_unlock_adapter(struct i2c_adapter *adapter)
-{
- i2c_unlock_bus(adapter, I2C_LOCK_ROOT_ADAPTER);
-}
-
/*flags for the client struct: */
#define I2C_CLIENT_PEC 0x04 /* Use Packet Error Checking */
#define I2C_CLIENT_TEN 0x10 /* we have a ten bit chip address */
diff --git a/include/linux/idle_inject.h b/include/linux/idle_inject.h
new file mode 100644
index 000000000000..bdc0293fb6cb
--- /dev/null
+++ b/include/linux/idle_inject.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 Linaro Ltd
+ *
+ * Author: Daniel Lezcano <daniel.lezcano@linaro.org>
+ *
+ */
+#ifndef __IDLE_INJECT_H__
+#define __IDLE_INJECT_H__
+
+/* private idle injection device structure */
+struct idle_inject_device;
+
+struct idle_inject_device *idle_inject_register(struct cpumask *cpumask);
+
+void idle_inject_unregister(struct idle_inject_device *ii_dev);
+
+int idle_inject_start(struct idle_inject_device *ii_dev);
+
+void idle_inject_stop(struct idle_inject_device *ii_dev);
+
+void idle_inject_set_duration(struct idle_inject_device *ii_dev,
+ unsigned int run_duration_ms,
+ unsigned int idle_duration_ms);
+
+void idle_inject_get_duration(struct idle_inject_device *ii_dev,
+ unsigned int *run_duration_ms,
+ unsigned int *idle_duration_ms);
+#endif /* __IDLE_INJECT_H__ */
diff --git a/include/linux/idr.h b/include/linux/idr.h
index e856f4e0ab35..3e8215b2c371 100644
--- a/include/linux/idr.h
+++ b/include/linux/idr.h
@@ -98,6 +98,17 @@ static inline void idr_set_cursor(struct idr *idr, unsigned int val)
* period).
*/
+#define idr_lock(idr) xa_lock(&(idr)->idr_rt)
+#define idr_unlock(idr) xa_unlock(&(idr)->idr_rt)
+#define idr_lock_bh(idr) xa_lock_bh(&(idr)->idr_rt)
+#define idr_unlock_bh(idr) xa_unlock_bh(&(idr)->idr_rt)
+#define idr_lock_irq(idr) xa_lock_irq(&(idr)->idr_rt)
+#define idr_unlock_irq(idr) xa_unlock_irq(&(idr)->idr_rt)
+#define idr_lock_irqsave(idr, flags) \
+ xa_lock_irqsave(&(idr)->idr_rt, flags)
+#define idr_unlock_irqrestore(idr, flags) \
+ xa_unlock_irqrestore(&(idr)->idr_rt, flags)
+
void idr_preload(gfp_t gfp_mask);
int idr_alloc(struct idr *, void *ptr, int start, int end, gfp_t);
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 8fe7e4306816..9c03a7d5e400 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -1433,11 +1433,13 @@ struct ieee80211_ht_operation {
#define IEEE80211_DELBA_PARAM_INITIATOR_MASK 0x0800
/*
- * A-PMDU buffer sizes
- * According to IEEE802.11n spec size varies from 8K to 64K (in powers of 2)
+ * A-MPDU buffer sizes
+ * According to HT size varies from 8 to 64 frames
+ * HE adds the ability to have up to 256 frames.
*/
-#define IEEE80211_MIN_AMPDU_BUF 0x8
-#define IEEE80211_MAX_AMPDU_BUF 0x40
+#define IEEE80211_MIN_AMPDU_BUF 0x8
+#define IEEE80211_MAX_AMPDU_BUF_HT 0x40
+#define IEEE80211_MAX_AMPDU_BUF 0x100
/* Spatial Multiplexing Power Save Modes (for capability) */
@@ -1539,6 +1541,106 @@ struct ieee80211_vht_operation {
__le16 basic_mcs_set;
} __packed;
+/**
+ * struct ieee80211_he_cap_elem - HE capabilities element
+ *
+ * This structure is the "HE capabilities element" fixed fields as
+ * described in P802.11ax_D2.0 section 9.4.2.237.2 and 9.4.2.237.3
+ */
+struct ieee80211_he_cap_elem {
+ u8 mac_cap_info[5];
+ u8 phy_cap_info[9];
+} __packed;
+
+#define IEEE80211_TX_RX_MCS_NSS_DESC_MAX_LEN 5
+
+/**
+ * enum ieee80211_he_mcs_support - HE MCS support definitions
+ * @IEEE80211_HE_MCS_SUPPORT_0_7: MCSes 0-7 are supported for the
+ * number of streams
+ * @IEEE80211_HE_MCS_SUPPORT_0_9: MCSes 0-9 are supported
+ * @IEEE80211_HE_MCS_SUPPORT_0_11: MCSes 0-11 are supported
+ * @IEEE80211_HE_MCS_NOT_SUPPORTED: This number of streams isn't supported
+ *
+ * These definitions are used in each 2-bit subfield of the rx_mcs_*
+ * and tx_mcs_* fields of &struct ieee80211_he_mcs_nss_supp, which are
+ * both split into 8 subfields by number of streams. These values indicate
+ * which MCSes are supported for the number of streams the value appears
+ * for.
+ */
+enum ieee80211_he_mcs_support {
+ IEEE80211_HE_MCS_SUPPORT_0_7 = 0,
+ IEEE80211_HE_MCS_SUPPORT_0_9 = 1,
+ IEEE80211_HE_MCS_SUPPORT_0_11 = 2,
+ IEEE80211_HE_MCS_NOT_SUPPORTED = 3,
+};
+
+/**
+ * struct ieee80211_he_mcs_nss_supp - HE Tx/Rx HE MCS NSS Support Field
+ *
+ * This structure holds the data required for the Tx/Rx HE MCS NSS Support Field
+ * described in P802.11ax_D2.0 section 9.4.2.237.4
+ *
+ * @rx_mcs_80: Rx MCS map 2 bits for each stream, total 8 streams, for channel
+ * widths less than 80MHz.
+ * @tx_mcs_80: Tx MCS map 2 bits for each stream, total 8 streams, for channel
+ * widths less than 80MHz.
+ * @rx_mcs_160: Rx MCS map 2 bits for each stream, total 8 streams, for channel
+ * width 160MHz.
+ * @tx_mcs_160: Tx MCS map 2 bits for each stream, total 8 streams, for channel
+ * width 160MHz.
+ * @rx_mcs_80p80: Rx MCS map 2 bits for each stream, total 8 streams, for
+ * channel width 80p80MHz.
+ * @tx_mcs_80p80: Tx MCS map 2 bits for each stream, total 8 streams, for
+ * channel width 80p80MHz.
+ */
+struct ieee80211_he_mcs_nss_supp {
+ __le16 rx_mcs_80;
+ __le16 tx_mcs_80;
+ __le16 rx_mcs_160;
+ __le16 tx_mcs_160;
+ __le16 rx_mcs_80p80;
+ __le16 tx_mcs_80p80;
+} __packed;
+
+/**
+ * struct ieee80211_he_operation - HE capabilities element
+ *
+ * This structure is the "HE operation element" fields as
+ * described in P802.11ax_D2.0 section 9.4.2.238
+ */
+struct ieee80211_he_operation {
+ __le32 he_oper_params;
+ __le16 he_mcs_nss_set;
+ /* Optional 0,1,3 or 4 bytes: depends on @he_oper_params */
+ u8 optional[0];
+} __packed;
+
+/**
+ * struct ieee80211_he_mu_edca_param_ac_rec - MU AC Parameter Record field
+ *
+ * This structure is the "MU AC Parameter Record" fields as
+ * described in P802.11ax_D2.0 section 9.4.2.240
+ */
+struct ieee80211_he_mu_edca_param_ac_rec {
+ u8 aifsn;
+ u8 ecw_min_max;
+ u8 mu_edca_timer;
+} __packed;
+
+/**
+ * struct ieee80211_mu_edca_param_set - MU EDCA Parameter Set element
+ *
+ * This structure is the "MU EDCA Parameter Set element" fields as
+ * described in P802.11ax_D2.0 section 9.4.2.240
+ */
+struct ieee80211_mu_edca_param_set {
+ u8 mu_qos_info;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_be;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_bk;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_vi;
+ struct ieee80211_he_mu_edca_param_ac_rec ac_vo;
+} __packed;
/* 802.11ac VHT Capabilities */
#define IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 0x00000000
@@ -1577,6 +1679,328 @@ struct ieee80211_vht_operation {
#define IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN 0x10000000
#define IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN 0x20000000
+/* 802.11ax HE MAC capabilities */
+#define IEEE80211_HE_MAC_CAP0_HTC_HE 0x01
+#define IEEE80211_HE_MAC_CAP0_TWT_REQ 0x02
+#define IEEE80211_HE_MAC_CAP0_TWT_RES 0x04
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_NOT_SUPP 0x00
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_1 0x08
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_2 0x10
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_LEVEL_3 0x18
+#define IEEE80211_HE_MAC_CAP0_DYNAMIC_FRAG_MASK 0x18
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_1 0x00
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_2 0x20
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_4 0x40
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_8 0x60
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_16 0x80
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_32 0xa0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_64 0xc0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_UNLIMITED 0xe0
+#define IEEE80211_HE_MAC_CAP0_MAX_NUM_FRAG_MSDU_MASK 0xe0
+
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_UNLIMITED 0x00
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_128 0x01
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_256 0x02
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_512 0x03
+#define IEEE80211_HE_MAC_CAP1_MIN_FRAG_SIZE_MASK 0x03
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_0US 0x00
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_8US 0x04
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US 0x08
+#define IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_MASK 0x0c
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_1 0x00
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_2 0x10
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_3 0x20
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_4 0x30
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_5 0x40
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_6 0x50
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_7 0x60
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8 0x70
+#define IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_MASK 0x70
+
+/* Link adaptation is split between byte HE_MAC_CAP1 and
+ * HE_MAC_CAP2. It should be set only if IEEE80211_HE_MAC_CAP0_HTC_HE
+ * in which case the following values apply:
+ * 0 = No feedback.
+ * 1 = reserved.
+ * 2 = Unsolicited feedback.
+ * 3 = both
+ */
+#define IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION 0x80
+
+#define IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION 0x01
+#define IEEE80211_HE_MAC_CAP2_ALL_ACK 0x02
+#define IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED 0x04
+#define IEEE80211_HE_MAC_CAP2_BSR 0x08
+#define IEEE80211_HE_MAC_CAP2_BCAST_TWT 0x10
+#define IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP 0x20
+#define IEEE80211_HE_MAC_CAP2_MU_CASCADING 0x40
+#define IEEE80211_HE_MAC_CAP2_ACK_EN 0x80
+
+#define IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU 0x01
+#define IEEE80211_HE_MAC_CAP3_OMI_CONTROL 0x02
+#define IEEE80211_HE_MAC_CAP3_OFDMA_RA 0x04
+
+/* The maximum length of an A-MDPU is defined by the combination of the Maximum
+ * A-MDPU Length Exponent field in the HT capabilities, VHT capabilities and the
+ * same field in the HE capabilities.
+ */
+#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_USE_VHT 0x00
+#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_1 0x08
+#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2 0x10
+#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_RESERVED 0x18
+#define IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_MASK 0x18
+#define IEEE80211_HE_MAC_CAP3_A_AMSDU_FRAG 0x20
+#define IEEE80211_HE_MAC_CAP3_FLEX_TWT_SCHED 0x40
+#define IEEE80211_HE_MAC_CAP3_RX_CTRL_FRAME_TO_MULTIBSS 0x80
+
+#define IEEE80211_HE_MAC_CAP4_BSRP_BQRP_A_MPDU_AGG 0x01
+#define IEEE80211_HE_MAC_CAP4_QTP 0x02
+#define IEEE80211_HE_MAC_CAP4_BQR 0x04
+#define IEEE80211_HE_MAC_CAP4_SR_RESP 0x08
+#define IEEE80211_HE_MAC_CAP4_NDP_FB_REP 0x10
+#define IEEE80211_HE_MAC_CAP4_OPS 0x20
+#define IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU 0x40
+
+/* 802.11ax HE PHY capabilities */
+#define IEEE80211_HE_PHY_CAP0_DUAL_BAND 0x01
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G 0x02
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G 0x04
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G 0x08
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G 0x10
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_2G 0x20
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_RU_MAPPING_IN_5G 0x40
+#define IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_MASK 0xfe
+
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_20MHZ 0x01
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_80MHZ_ONLY_SECOND_40MHZ 0x02
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_20MHZ 0x04
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_160MHZ_ONLY_SECOND_40MHZ 0x08
+#define IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK 0x0f
+#define IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A 0x10
+#define IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD 0x20
+#define IEEE80211_HE_PHY_CAP1_HE_LTF_AND_GI_FOR_HE_PPDUS_0_8US 0x40
+/* Midamble RX Max NSTS is split between byte #2 and byte #3 */
+#define IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS 0x80
+
+#define IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS 0x01
+#define IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US 0x02
+#define IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ 0x04
+#define IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ 0x08
+#define IEEE80211_HE_PHY_CAP2_DOPPLER_TX 0x10
+#define IEEE80211_HE_PHY_CAP2_DOPPLER_RX 0x20
+
+/* Note that the meaning of UL MU below is different between an AP and a non-AP
+ * sta, where in the AP case it indicates support for Rx and in the non-AP sta
+ * case it indicates support for Tx.
+ */
+#define IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO 0x40
+#define IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO 0x80
+
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_NO_DCM 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK 0x01
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_QPSK 0x02
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_16_QAM 0x03
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_MASK 0x03
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_2 0x04
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_NO_DCM 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK 0x08
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_QPSK 0x10
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_16_QAM 0x18
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_MASK 0x18
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1 0x00
+#define IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_2 0x20
+#define IEEE80211_HE_PHY_CAP3_RX_HE_MU_PPDU_FROM_NON_AP_STA 0x40
+#define IEEE80211_HE_PHY_CAP3_SU_BEAMFORMER 0x80
+
+#define IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE 0x01
+#define IEEE80211_HE_PHY_CAP4_MU_BEAMFORMER 0x02
+
+/* Minimal allowed value of Max STS under 80MHz is 3 */
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_4 0x0c
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_5 0x10
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_6 0x14
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_7 0x18
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8 0x1c
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_MASK 0x1c
+
+/* Minimal allowed value of Max STS above 80MHz is 3 */
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_4 0x60
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_5 0x80
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_6 0xa0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_7 0xc0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 0xe0
+#define IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_MASK 0xe0
+
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_1 0x00
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 0x01
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_3 0x02
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_4 0x03
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_5 0x04
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_6 0x05
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_7 0x06
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_8 0x07
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_MASK 0x07
+
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_1 0x00
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2 0x08
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_3 0x10
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_4 0x18
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_5 0x20
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_6 0x28
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_7 0x30
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_8 0x38
+#define IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_MASK 0x38
+
+#define IEEE80211_HE_PHY_CAP5_NG16_SU_FEEDBACK 0x40
+#define IEEE80211_HE_PHY_CAP5_NG16_MU_FEEDBACK 0x80
+
+#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_42_SU 0x01
+#define IEEE80211_HE_PHY_CAP6_CODEBOOK_SIZE_75_MU 0x02
+#define IEEE80211_HE_PHY_CAP6_TRIG_SU_BEAMFORMER_FB 0x04
+#define IEEE80211_HE_PHY_CAP6_TRIG_MU_BEAMFORMER_FB 0x08
+#define IEEE80211_HE_PHY_CAP6_TRIG_CQI_FB 0x10
+#define IEEE80211_HE_PHY_CAP6_PARTIAL_BW_EXT_RANGE 0x20
+#define IEEE80211_HE_PHY_CAP6_PARTIAL_BANDWIDTH_DL_MUMIMO 0x40
+#define IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT 0x80
+
+#define IEEE80211_HE_PHY_CAP7_SRP_BASED_SR 0x01
+#define IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR 0x02
+#define IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI 0x04
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_1 0x08
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_2 0x10
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_3 0x18
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_4 0x20
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_5 0x28
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_6 0x30
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_7 0x38
+#define IEEE80211_HE_PHY_CAP7_MAX_NC_MASK 0x38
+#define IEEE80211_HE_PHY_CAP7_STBC_TX_ABOVE_80MHZ 0x40
+#define IEEE80211_HE_PHY_CAP7_STBC_RX_ABOVE_80MHZ 0x80
+
+#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI 0x01
+#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G 0x02
+#define IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU 0x04
+#define IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU 0x08
+#define IEEE80211_HE_PHY_CAP8_HE_ER_SU_1XLTF_AND_08_US_GI 0x10
+#define IEEE80211_HE_PHY_CAP8_MIDAMBLE_RX_2X_AND_1XLTF 0x20
+
+/* 802.11ax HE TX/RX MCS NSS Support */
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_HIGHEST_MCS_POS (3)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_POS (6)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_POS (11)
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_TX_BITMAP_MASK 0x07c0
+#define IEEE80211_TX_RX_MCS_NSS_SUPP_RX_BITMAP_MASK 0xf800
+
+/* TX/RX HE MCS Support field Highest MCS subfield encoding */
+enum ieee80211_he_highest_mcs_supported_subfield_enc {
+ HIGHEST_MCS_SUPPORTED_MCS7 = 0,
+ HIGHEST_MCS_SUPPORTED_MCS8,
+ HIGHEST_MCS_SUPPORTED_MCS9,
+ HIGHEST_MCS_SUPPORTED_MCS10,
+ HIGHEST_MCS_SUPPORTED_MCS11,
+};
+
+/* Calculate 802.11ax HE capabilities IE Tx/Rx HE MCS NSS Support Field size */
+static inline u8
+ieee80211_he_mcs_nss_size(const struct ieee80211_he_cap_elem *he_cap)
+{
+ u8 count = 4;
+
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G)
+ count += 4;
+
+ if (he_cap->phy_cap_info[0] &
+ IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G)
+ count += 4;
+
+ return count;
+}
+
+/* 802.11ax HE PPE Thresholds */
+#define IEEE80211_PPE_THRES_NSS_SUPPORT_2NSS (1)
+#define IEEE80211_PPE_THRES_NSS_POS (0)
+#define IEEE80211_PPE_THRES_NSS_MASK (7)
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_2x966_AND_966_RU \
+ (BIT(5) | BIT(6))
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK 0x78
+#define IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS (3)
+#define IEEE80211_PPE_THRES_INFO_PPET_SIZE (3)
+
+/*
+ * Calculate 802.11ax HE capabilities IE PPE field size
+ * Input: Header byte of ppe_thres (first byte), and HE capa IE's PHY cap u8*
+ */
+static inline u8
+ieee80211_he_ppe_size(u8 ppe_thres_hdr, const u8 *phy_cap_info)
+{
+ u8 n;
+
+ if ((phy_cap_info[6] &
+ IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) == 0)
+ return 0;
+
+ n = hweight8(ppe_thres_hdr &
+ IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK);
+ n *= (1 + ((ppe_thres_hdr & IEEE80211_PPE_THRES_NSS_MASK) >>
+ IEEE80211_PPE_THRES_NSS_POS));
+
+ /*
+ * Each pair is 6 bits, and we need to add the 7 "header" bits to the
+ * total size.
+ */
+ n = (n * IEEE80211_PPE_THRES_INFO_PPET_SIZE * 2) + 7;
+ n = DIV_ROUND_UP(n, 8);
+
+ return n;
+}
+
+/* HE Operation defines */
+#define IEEE80211_HE_OPERATION_BSS_COLOR_MASK 0x0000003f
+#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_MASK 0x000001c0
+#define IEEE80211_HE_OPERATION_DFLT_PE_DURATION_OFFSET 6
+#define IEEE80211_HE_OPERATION_TWT_REQUIRED 0x00000200
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_MASK 0x000ffc00
+#define IEEE80211_HE_OPERATION_RTS_THRESHOLD_OFFSET 10
+#define IEEE80211_HE_OPERATION_PARTIAL_BSS_COLOR 0x000100000
+#define IEEE80211_HE_OPERATION_VHT_OPER_INFO 0x000200000
+#define IEEE80211_HE_OPERATION_MULTI_BSSID_AP 0x10000000
+#define IEEE80211_HE_OPERATION_TX_BSSID_INDICATOR 0x20000000
+#define IEEE80211_HE_OPERATION_BSS_COLOR_DISABLED 0x40000000
+
+/*
+ * ieee80211_he_oper_size - calculate 802.11ax HE Operations IE size
+ * @he_oper_ie: byte data of the He Operations IE, stating from the the byte
+ * after the ext ID byte. It is assumed that he_oper_ie has at least
+ * sizeof(struct ieee80211_he_operation) bytes, checked already in
+ * ieee802_11_parse_elems_crc()
+ * @return the actual size of the IE data (not including header), or 0 on error
+ */
+static inline u8
+ieee80211_he_oper_size(const u8 *he_oper_ie)
+{
+ struct ieee80211_he_operation *he_oper = (void *)he_oper_ie;
+ u8 oper_len = sizeof(struct ieee80211_he_operation);
+ u32 he_oper_params;
+
+ /* Make sure the input is not NULL */
+ if (!he_oper_ie)
+ return 0;
+
+ /* Calc required length */
+ he_oper_params = le32_to_cpu(he_oper->he_oper_params);
+ if (he_oper_params & IEEE80211_HE_OPERATION_VHT_OPER_INFO)
+ oper_len += 3;
+ if (he_oper_params & IEEE80211_HE_OPERATION_MULTI_BSSID_AP)
+ oper_len++;
+
+ /* Add the first byte (extension ID) to the total length */
+ oper_len++;
+
+ return oper_len;
+}
+
/* Authentication algorithms */
#define WLAN_AUTH_OPEN 0
#define WLAN_AUTH_SHARED_KEY 1
@@ -1992,6 +2416,11 @@ enum ieee80211_eid_ext {
WLAN_EID_EXT_FILS_WRAPPED_DATA = 8,
WLAN_EID_EXT_FILS_PUBLIC_KEY = 12,
WLAN_EID_EXT_FILS_NONCE = 13,
+ WLAN_EID_EXT_FUTURE_CHAN_GUIDANCE = 14,
+ WLAN_EID_EXT_HE_CAPABILITY = 35,
+ WLAN_EID_EXT_HE_OPERATION = 36,
+ WLAN_EID_EXT_UORA = 37,
+ WLAN_EID_EXT_HE_MU_EDCA = 38,
};
/* Action category code */
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index 7843b98e1c6e..c20c7e197d07 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -105,13 +105,13 @@ static inline bool br_vlan_enabled(const struct net_device *dev)
static inline int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid)
{
- return -1;
+ return -EINVAL;
}
static inline int br_vlan_get_info(const struct net_device *dev, u16 vid,
struct bridge_vlan_info *p_vinfo)
{
- return -1;
+ return -EINVAL;
}
#endif
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index d95cae09dea0..ac42da56f7a2 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -74,6 +74,11 @@ struct team_port {
long mode_priv[0];
};
+static inline struct team_port *team_port_get_rcu(const struct net_device *dev)
+{
+ return rcu_dereference(dev->rx_handler_data);
+}
+
static inline bool team_port_enabled(struct team_port *port)
{
return port->index != -1;
@@ -84,6 +89,19 @@ static inline bool team_port_txable(struct team_port *port)
return port->linkup && team_port_enabled(port);
}
+static inline bool team_port_dev_txable(const struct net_device *port_dev)
+{
+ struct team_port *port;
+ bool txable;
+
+ rcu_read_lock();
+ port = team_port_get_rcu(port_dev);
+ txable = port ? team_port_txable(port) : false;
+ rcu_read_unlock();
+
+ return txable;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static inline void team_netpoll_send_skb(struct team_port *port,
struct sk_buff *skb)
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index f8231854b5d6..119f53941c12 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -109,6 +109,8 @@ struct ip_mc_list {
extern int ip_check_mc_rcu(struct in_device *dev, __be32 mc_addr, __be32 src_addr, u8 proto);
extern int igmp_rcv(struct sk_buff *);
extern int ip_mc_join_group(struct sock *sk, struct ip_mreqn *imr);
+extern int ip_mc_join_group_ssm(struct sock *sk, struct ip_mreqn *imr,
+ unsigned int mode);
extern int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr);
extern void ip_mc_drop_socket(struct sock *sk);
extern int ip_mc_source(int add, int omode, struct sock *sk,
diff --git a/include/linux/iio/buffer-dma.h b/include/linux/iio/buffer-dma.h
index 767467d886de..67c75372b691 100644
--- a/include/linux/iio/buffer-dma.h
+++ b/include/linux/iio/buffer-dma.h
@@ -141,7 +141,7 @@ int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
char __user *user_buffer);
size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
-int iio_dma_buffer_set_length(struct iio_buffer *buffer, int length);
+int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
int iio_dma_buffer_request_update(struct iio_buffer *buffer);
int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 0e4647e0eb60..97914a2833d1 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -11,14 +11,16 @@
#define _LINUX_IMA_H
#include <linux/fs.h>
+#include <linux/security.h>
#include <linux/kexec.h>
struct linux_binprm;
#ifdef CONFIG_IMA
extern int ima_bprm_check(struct linux_binprm *bprm);
-extern int ima_file_check(struct file *file, int mask, int opened);
+extern int ima_file_check(struct file *file, int mask);
extern void ima_file_free(struct file *file);
extern int ima_file_mmap(struct file *file, unsigned long prot);
+extern int ima_load_data(enum kernel_load_data_id id);
extern int ima_read_file(struct file *file, enum kernel_read_file_id id);
extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
enum kernel_read_file_id id);
@@ -34,7 +36,7 @@ static inline int ima_bprm_check(struct linux_binprm *bprm)
return 0;
}
-static inline int ima_file_check(struct file *file, int mask, int opened)
+static inline int ima_file_check(struct file *file, int mask)
{
return 0;
}
@@ -49,6 +51,11 @@ static inline int ima_file_mmap(struct file *file, unsigned long prot)
return 0;
}
+static inline int ima_load_data(enum kernel_load_data_id id)
+{
+ return 0;
+}
+
static inline int ima_read_file(struct file *file, enum kernel_read_file_id id)
{
return 0;
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 27650f1bff3d..c759d1cbcedd 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -93,6 +93,7 @@ static inline void ipv4_devconf_setall(struct in_device *in_dev)
#define IN_DEV_FORWARD(in_dev) IN_DEV_CONF_GET((in_dev), FORWARDING)
#define IN_DEV_MFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), MC_FORWARDING)
+#define IN_DEV_BFORWARD(in_dev) IN_DEV_ANDCONF((in_dev), BC_FORWARDING)
#define IN_DEV_RPFILTER(in_dev) IN_DEV_MAXCONF((in_dev), RP_FILTER)
#define IN_DEV_SRC_VMARK(in_dev) IN_DEV_ORCONF((in_dev), SRC_VMARK)
#define IN_DEV_SOURCE_ROUTE(in_dev) IN_DEV_ANDCONF((in_dev), \
diff --git a/include/linux/init.h b/include/linux/init.h
index bc27cf03c41e..2538d176dd1f 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -116,8 +116,24 @@
typedef int (*initcall_t)(void);
typedef void (*exitcall_t)(void);
-extern initcall_t __con_initcall_start[], __con_initcall_end[];
-extern initcall_t __security_initcall_start[], __security_initcall_end[];
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+typedef int initcall_entry_t;
+
+static inline initcall_t initcall_from_entry(initcall_entry_t *entry)
+{
+ return offset_to_ptr(entry);
+}
+#else
+typedef initcall_t initcall_entry_t;
+
+static inline initcall_t initcall_from_entry(initcall_entry_t *entry)
+{
+ return *entry;
+}
+#endif
+
+extern initcall_entry_t __con_initcall_start[], __con_initcall_end[];
+extern initcall_entry_t __security_initcall_start[], __security_initcall_end[];
/* Used for contructor calls. */
typedef void (*ctor_fn_t)(void);
@@ -167,9 +183,20 @@ extern bool initcall_debug;
* as KEEP() in the linker script.
*/
-#define __define_initcall(fn, id) \
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+#define ___define_initcall(fn, id, __sec) \
+ __ADDRESSABLE(fn) \
+ asm(".section \"" #__sec ".init\", \"a\" \n" \
+ "__initcall_" #fn #id ": \n" \
+ ".long " #fn " - . \n" \
+ ".previous \n");
+#else
+#define ___define_initcall(fn, id, __sec) \
static initcall_t __initcall_##fn##id __used \
- __attribute__((__section__(".initcall" #id ".init"))) = fn;
+ __attribute__((__section__(#__sec ".init"))) = fn;
+#endif
+
+#define __define_initcall(fn, id) ___define_initcall(fn, id, .initcall##id)
/*
* Early initcalls run before initializing SMP.
@@ -208,13 +235,8 @@ extern bool initcall_debug;
#define __exitcall(fn) \
static exitcall_t __exitcall_##fn __exit_call = fn
-#define console_initcall(fn) \
- static initcall_t __initcall_##fn \
- __used __section(.con_initcall.init) = fn
-
-#define security_initcall(fn) \
- static initcall_t __initcall_##fn \
- __used __section(.security_initcall.init) = fn
+#define console_initcall(fn) ___define_initcall(fn,, .con_initcall)
+#define security_initcall(fn) ___define_initcall(fn,, .security_initcall)
struct obs_kernel_param {
const char *str;
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index a454b8aeb938..a7083a45a26c 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -46,15 +46,6 @@ extern struct cred init_cred;
#define INIT_CPU_TIMERS(s)
#endif
-#define INIT_PID_LINK(type) \
-{ \
- .node = { \
- .next = NULL, \
- .pprev = NULL, \
- }, \
- .pid = &init_struct_pid, \
-}
-
#define INIT_TASK_COMM "swapper"
/* Attach to the init_task data structure for proper alignment */
diff --git a/include/linux/inotify.h b/include/linux/inotify.h
index 44f9ffe72c87..6a24905f6e1e 100644
--- a/include/linux/inotify.h
+++ b/include/linux/inotify.h
@@ -18,6 +18,6 @@ extern struct ctl_table inotify_table[]; /* for sysctl */
IN_DELETE_SELF | IN_MOVE_SELF | IN_UNMOUNT | \
IN_Q_OVERFLOW | IN_IGNORED | IN_ONLYDIR | \
IN_DONT_FOLLOW | IN_EXCL_UNLINK | IN_MASK_ADD | \
- IN_ISDIR | IN_ONESHOT)
+ IN_MASK_CREATE | IN_ISDIR | IN_ONESHOT)
#endif /* _LINUX_INOTIFY_H */
diff --git a/include/linux/input/mt.h b/include/linux/input/mt.h
index d7188de4db96..3f4bf60b0bb5 100644
--- a/include/linux/input/mt.h
+++ b/include/linux/input/mt.h
@@ -100,7 +100,7 @@ static inline bool input_is_mt_axis(int axis)
return axis == ABS_MT_SLOT || input_is_mt_value(axis);
}
-void input_mt_report_slot_state(struct input_dev *dev,
+bool input_mt_report_slot_state(struct input_dev *dev,
unsigned int tool_type, bool active);
void input_mt_report_finger_count(struct input_dev *dev, int count);
diff --git a/include/linux/integrity.h b/include/linux/integrity.h
index 858d3f4a2241..54c853ec2fd1 100644
--- a/include/linux/integrity.h
+++ b/include/linux/integrity.h
@@ -44,4 +44,17 @@ static inline void integrity_load_keys(void)
}
#endif /* CONFIG_INTEGRITY */
+#ifdef CONFIG_INTEGRITY_ASYMMETRIC_KEYS
+
+extern int integrity_kernel_module_request(char *kmod_name);
+
+#else
+
+static inline int integrity_kernel_module_request(char *kmod_name)
+{
+ return 0;
+}
+
+#endif /* CONFIG_INTEGRITY_ASYMMETRIC_KEYS */
+
#endif /* _LINUX_INTEGRITY_H */
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 1df940196ab2..28004d74ae04 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -31,6 +31,7 @@
#include <linux/list.h>
#include <linux/iommu.h>
#include <linux/io-64-nonatomic-lo-hi.h>
+#include <linux/dmar.h>
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -114,6 +115,7 @@
* Extended Capability Register
*/
+#define ecap_dit(e) ((e >> 41) & 0x1)
#define ecap_pasid(e) ((e >> 40) & 0x1)
#define ecap_pss(e) ((e >> 35) & 0x1f)
#define ecap_eafs(e) ((e >> 34) & 0x1)
@@ -121,6 +123,7 @@
#define ecap_srs(e) ((e >> 31) & 0x1)
#define ecap_ers(e) ((e >> 30) & 0x1)
#define ecap_prs(e) ((e >> 29) & 0x1)
+#define ecap_broken_pasid(e) ((e >> 28) & 0x1)
#define ecap_dis(e) ((e >> 27) & 0x1)
#define ecap_nest(e) ((e >> 26) & 0x1)
#define ecap_mts(e) ((e >> 25) & 0x1)
@@ -283,6 +286,7 @@ enum {
#define QI_DEV_IOTLB_SID(sid) ((u64)((sid) & 0xffff) << 32)
#define QI_DEV_IOTLB_QDEP(qdep) (((qdep) & 0x1f) << 16)
#define QI_DEV_IOTLB_ADDR(addr) ((u64)(addr) & VTD_PAGE_MASK)
+#define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
#define QI_DEV_IOTLB_SIZE 1
#define QI_DEV_IOTLB_MAX_INVS 32
@@ -307,6 +311,7 @@ enum {
#define QI_DEV_EIOTLB_PASID(p) (((u64)p) << 32)
#define QI_DEV_EIOTLB_SID(sid) ((u64)((sid) & 0xffff) << 16)
#define QI_DEV_EIOTLB_QDEP(qd) ((u64)((qd) & 0x1f) << 4)
+#define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | ((u64)(pfsid & 0xfff) << 52))
#define QI_DEV_EIOTLB_MAX_INVS 32
#define QI_PGRP_IDX(idx) (((u64)(idx)) << 55)
@@ -384,6 +389,42 @@ struct pasid_entry;
struct pasid_state_entry;
struct page_req_dsc;
+struct dmar_domain {
+ int nid; /* node id */
+
+ unsigned iommu_refcnt[DMAR_UNITS_SUPPORTED];
+ /* Refcount of devices per iommu */
+
+
+ u16 iommu_did[DMAR_UNITS_SUPPORTED];
+ /* Domain ids per IOMMU. Use u16 since
+ * domain ids are 16 bit wide according
+ * to VT-d spec, section 9.3 */
+
+ bool has_iotlb_device;
+ struct list_head devices; /* all devices' list */
+ struct iova_domain iovad; /* iova's that belong to this domain */
+
+ struct dma_pte *pgd; /* virtual address */
+ int gaw; /* max guest address width */
+
+ /* adjusted guest address width, 0 is level 2 30-bit */
+ int agaw;
+
+ int flags; /* flags to find out type of domain */
+
+ int iommu_coherency;/* indicate coherency of iommu access */
+ int iommu_snooping; /* indicate snooping control feature*/
+ int iommu_count; /* reference count of iommu */
+ int iommu_superpage;/* Level of superpages supported:
+ 0 == 4KiB (no superpages), 1 == 2MiB,
+ 2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
+ u64 max_addr; /* maximum mapped address */
+
+ struct iommu_domain domain; /* generic domain data structure for
+ iommu core */
+};
+
struct intel_iommu {
void __iomem *reg; /* Pointer to hardware regs, virtual addr */
u64 reg_phys; /* physical address of hw register set */
@@ -413,11 +454,9 @@ struct intel_iommu {
* devices away to userspace processes (e.g. for DPDK) and don't
* want to trust that userspace will use *only* the PASID it was
* told to. But while it's all driver-arbitrated, we're fine. */
- struct pasid_entry *pasid_table;
struct pasid_state_entry *pasid_state_table;
struct page_req_dsc *prq;
unsigned char prq_name[16]; /* Name for PRQ interrupt */
- struct idr pasid_idr;
u32 pasid_max;
#endif
struct q_inval *qi; /* Queued invalidation info */
@@ -433,6 +472,27 @@ struct intel_iommu {
u32 flags; /* Software defined flags */
};
+/* PCI domain-device relationship */
+struct device_domain_info {
+ struct list_head link; /* link to domain siblings */
+ struct list_head global; /* link to global list */
+ struct list_head table; /* link to pasid table */
+ u8 bus; /* PCI bus number */
+ u8 devfn; /* PCI devfn number */
+ u16 pfsid; /* SRIOV physical function source ID */
+ u8 pasid_supported:3;
+ u8 pasid_enabled:1;
+ u8 pri_supported:1;
+ u8 pri_enabled:1;
+ u8 ats_supported:1;
+ u8 ats_enabled:1;
+ u8 ats_qdep;
+ struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
+ struct intel_iommu *iommu; /* IOMMU used by this device */
+ struct dmar_domain *domain; /* pointer to domain */
+ struct pasid_table *pasid_table; /* pasid table */
+};
+
static inline void __iommu_flush_cache(
struct intel_iommu *iommu, void *addr, int size)
{
@@ -452,16 +512,22 @@ extern void qi_flush_context(struct intel_iommu *iommu, u16 did, u16 sid,
u8 fm, u64 type);
extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type);
-extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 qdep,
- u64 addr, unsigned mask);
-
+extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
+ u16 qdep, u64 addr, unsigned mask);
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
extern int dmar_ir_support(void);
+struct dmar_domain *get_valid_domain_for_dev(struct device *dev);
+void *alloc_pgtable_page(int node);
+void free_pgtable_page(void *vaddr);
+struct intel_iommu *domain_get_iommu(struct dmar_domain *domain);
+int for_each_device_domain(int (*fn)(struct device_domain_info *info,
+ void *data), void *data);
+
#ifdef CONFIG_INTEL_IOMMU_SVM
-extern int intel_svm_alloc_pasid_tables(struct intel_iommu *iommu);
-extern int intel_svm_free_pasid_tables(struct intel_iommu *iommu);
+int intel_svm_init(struct intel_iommu *iommu);
+int intel_svm_exit(struct intel_iommu *iommu);
extern int intel_svm_enable_prq(struct intel_iommu *iommu);
extern int intel_svm_finish_prq(struct intel_iommu *iommu);
@@ -485,6 +551,7 @@ struct intel_svm {
int flags;
int pasid;
struct list_head devs;
+ struct list_head list;
};
extern int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct intel_svm_dev *sdev);
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index a044a824da85..3555d54bf79a 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -2,6 +2,9 @@
#ifndef LINUX_IOMAP_H
#define LINUX_IOMAP_H 1
+#include <linux/atomic.h>
+#include <linux/bitmap.h>
+#include <linux/mm.h>
#include <linux/types.h>
struct address_space;
@@ -9,6 +12,7 @@ struct fiemap_extent_info;
struct inode;
struct iov_iter;
struct kiocb;
+struct page;
struct vm_area_struct;
struct vm_fault;
@@ -29,6 +33,7 @@ struct vm_fault;
*/
#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
#define IOMAP_F_DIRTY 0x02 /* uncommitted metadata */
+#define IOMAP_F_BUFFER_HEAD 0x04 /* file system requires buffer heads */
/*
* Flags that only need to be reported for IOMAP_REPORT requests:
@@ -55,6 +60,16 @@ struct iomap {
u16 flags; /* flags for mapping */
struct block_device *bdev; /* block device for I/O */
struct dax_device *dax_dev; /* dax_dev for dax operations */
+ void *inline_data;
+ void *private; /* filesystem private */
+
+ /*
+ * Called when finished processing a page in the mapping returned in
+ * this iomap. At least for now this is only supported in the buffered
+ * write path.
+ */
+ void (*page_done)(struct inode *inode, loff_t pos, unsigned copied,
+ struct page *page, struct iomap *iomap);
};
/*
@@ -86,8 +101,40 @@ struct iomap_ops {
ssize_t written, unsigned flags, struct iomap *iomap);
};
+/*
+ * Structure allocate for each page when block size < PAGE_SIZE to track
+ * sub-page uptodate status and I/O completions.
+ */
+struct iomap_page {
+ atomic_t read_count;
+ atomic_t write_count;
+ DECLARE_BITMAP(uptodate, PAGE_SIZE / 512);
+};
+
+static inline struct iomap_page *to_iomap_page(struct page *page)
+{
+ if (page_has_private(page))
+ return (struct iomap_page *)page_private(page);
+ return NULL;
+}
+
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops);
+int iomap_readpage(struct page *page, const struct iomap_ops *ops);
+int iomap_readpages(struct address_space *mapping, struct list_head *pages,
+ unsigned nr_pages, const struct iomap_ops *ops);
+int iomap_set_page_dirty(struct page *page);
+int iomap_is_partially_uptodate(struct page *page, unsigned long from,
+ unsigned long count);
+int iomap_releasepage(struct page *page, gfp_t gfp_mask);
+void iomap_invalidatepage(struct page *page, unsigned int offset,
+ unsigned int len);
+#ifdef CONFIG_MIGRATION
+int iomap_migrate_page(struct address_space *mapping, struct page *newpage,
+ struct page *page, enum migrate_mode mode);
+#else
+#define iomap_migrate_page NULL
+#endif
int iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 19938ee6eb31..87994c265bf5 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -166,8 +166,6 @@ struct iommu_resv_region {
* @detach_dev: detach device from an iommu domain
* @map: map a physically contiguous memory region to an iommu domain
* @unmap: unmap a physically contiguous memory region from an iommu domain
- * @map_sg: map a scatter-gather list of physically contiguous memory chunks
- * to an iommu domain
* @flush_tlb_all: Synchronously flush all hardware TLBs for this domain
* @tlb_range_add: Add a given iova range to the flush queue for this domain
* @tlb_sync: Flush all queued ranges from the hardware TLBs and empty flush
@@ -201,8 +199,6 @@ struct iommu_ops {
phys_addr_t paddr, size_t size, int prot);
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
size_t size);
- size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg, unsigned int nents, int prot);
void (*flush_iotlb_all)(struct iommu_domain *domain);
void (*iotlb_range_add)(struct iommu_domain *domain,
unsigned long iova, size_t size);
@@ -303,9 +299,8 @@ extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size);
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
unsigned long iova, size_t size);
-extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
- struct scatterlist *sg,unsigned int nents,
- int prot);
+extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
+ struct scatterlist *sg,unsigned int nents, int prot);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
@@ -378,13 +373,6 @@ static inline void iommu_tlb_sync(struct iommu_domain *domain)
domain->ops->iotlb_sync(domain);
}
-static inline size_t iommu_map_sg(struct iommu_domain *domain,
- unsigned long iova, struct scatterlist *sg,
- unsigned int nents, int prot)
-{
- return domain->ops->map_sg(domain, iova, sg, nents, prot);
-}
-
/* PCI device grouping function */
extern struct iommu_group *pci_device_group(struct device *dev);
/* Generic device grouping function */
@@ -698,4 +686,11 @@ const struct iommu_ops *iommu_ops_from_fwnode(struct fwnode_handle *fwnode)
#endif /* CONFIG_IOMMU_API */
+#ifdef CONFIG_IOMMU_DEBUGFS
+extern struct dentry *iommu_debugfs_dir;
+void iommu_debugfs_setup(void);
+#else
+static inline void iommu_debugfs_setup(void) {}
+#endif
+
#endif /* __LINUX_IOMMU_H */
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index 6cc2df7f7ac9..e1c9eea6015b 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -4,7 +4,7 @@
#include <linux/spinlock.h>
#include <linux/uidgid.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
#include <uapi/linux/ipc.h>
#include <linux/refcount.h>
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index b5630c8eb2f3..6ab8c1bada3f 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -9,17 +9,16 @@
#include <linux/nsproxy.h>
#include <linux/ns_common.h>
#include <linux/refcount.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
struct user_namespace;
struct ipc_ids {
int in_use;
unsigned short seq;
- bool tables_initialized;
struct rw_semaphore rwsem;
struct idr ipcs_idr;
- int max_id;
+ int max_idx;
#ifdef CONFIG_CHECKPOINT_RESTORE
int next_id;
#endif
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 4bd2f34947f4..201de12a9957 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -503,6 +503,7 @@ struct irq_chip {
* IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
* IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
* IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
+ * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs
*/
enum {
IRQCHIP_SET_TYPE_MASKED = (1 << 0),
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index cbb872c1b607..8bdbb5f29494 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -61,6 +61,16 @@
#define GICD_CTLR_ENABLE_G1A (1U << 1)
#define GICD_CTLR_ENABLE_G1 (1U << 0)
+#define GICD_IIDR_IMPLEMENTER_SHIFT 0
+#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT)
+#define GICD_IIDR_REVISION_SHIFT 12
+#define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT)
+#define GICD_IIDR_VARIANT_SHIFT 16
+#define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT)
+#define GICD_IIDR_PRODUCT_ID_SHIFT 24
+#define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT)
+
+
/*
* In systems with a single security state (what we emulate in KVM)
* the meaning of the interrupt group enable bits is slightly different
@@ -73,6 +83,7 @@
#define GICD_TYPER_MBIS (1U << 16)
#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1)
+#define GICD_TYPER_NUM_LPIS(typer) ((((typer) >> 11) & 0x1f) + 1)
#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32)
#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
@@ -576,8 +587,8 @@ struct rdists {
phys_addr_t phys_base;
} __percpu *rdist;
struct page *prop_page;
- int id_bits;
u64 flags;
+ u32 gicd_typer;
bool has_vlpis;
bool has_direct_lpi;
};
diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
index 68d8b1f73682..6c4aaf04046c 100644
--- a/include/linux/irqchip/arm-gic.h
+++ b/include/linux/irqchip/arm-gic.h
@@ -71,6 +71,16 @@
(GICD_INT_DEF_PRI << 8) |\
GICD_INT_DEF_PRI)
+#define GICD_IIDR_IMPLEMENTER_SHIFT 0
+#define GICD_IIDR_IMPLEMENTER_MASK (0xfff << GICD_IIDR_IMPLEMENTER_SHIFT)
+#define GICD_IIDR_REVISION_SHIFT 12
+#define GICD_IIDR_REVISION_MASK (0xf << GICD_IIDR_REVISION_SHIFT)
+#define GICD_IIDR_VARIANT_SHIFT 16
+#define GICD_IIDR_VARIANT_MASK (0xf << GICD_IIDR_VARIANT_SHIFT)
+#define GICD_IIDR_PRODUCT_ID_SHIFT 24
+#define GICD_IIDR_PRODUCT_ID_MASK (0xff << GICD_IIDR_PRODUCT_ID_SHIFT)
+
+
#define GICH_HCR 0x0
#define GICH_VTR 0x4
#define GICH_VMCR 0x8
@@ -94,6 +104,7 @@
#define GICH_LR_PENDING_BIT (1 << 28)
#define GICH_LR_ACTIVE_BIT (1 << 29)
#define GICH_LR_EOI (1 << 19)
+#define GICH_LR_GROUP1 (1 << 30)
#define GICH_LR_HW (1 << 31)
#define GICH_VMCR_ENABLE_GRP0_SHIFT 0
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 25b33b664537..dd1e40ddac7d 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -145,11 +145,6 @@ static inline void *irq_desc_get_handler_data(struct irq_desc *desc)
return desc->irq_common_data.handler_data;
}
-static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
-{
- return desc->irq_common_data.msi_desc;
-}
-
/*
* Architectures call this to let the generic IRQ layer
* handle an interrupt.
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 9700f00bbc04..21619c92c377 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -15,9 +15,20 @@
#include <linux/typecheck.h>
#include <asm/irqflags.h>
-#ifdef CONFIG_TRACE_IRQFLAGS
+/* Currently trace_softirqs_on/off is used only by lockdep */
+#ifdef CONFIG_PROVE_LOCKING
extern void trace_softirqs_on(unsigned long ip);
extern void trace_softirqs_off(unsigned long ip);
+ extern void lockdep_hardirqs_on(unsigned long ip);
+ extern void lockdep_hardirqs_off(unsigned long ip);
+#else
+ static inline void trace_softirqs_on(unsigned long ip) { }
+ static inline void trace_softirqs_off(unsigned long ip) { }
+ static inline void lockdep_hardirqs_on(unsigned long ip) { }
+ static inline void lockdep_hardirqs_off(unsigned long ip) { }
+#endif
+
+#ifdef CONFIG_TRACE_IRQFLAGS
extern void trace_hardirqs_on(void);
extern void trace_hardirqs_off(void);
# define trace_hardirq_context(p) ((p)->hardirq_context)
@@ -43,8 +54,6 @@ do { \
#else
# define trace_hardirqs_on() do { } while (0)
# define trace_hardirqs_off() do { } while (0)
-# define trace_softirqs_on(ip) do { } while (0)
-# define trace_softirqs_off(ip) do { } while (0)
# define trace_hardirq_context(p) 0
# define trace_softirq_context(p) 0
# define trace_hardirqs_enabled(p) 0
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index a27cf6652327..fa928242567d 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -447,6 +447,11 @@ static inline clock_t jiffies_delta_to_clock_t(long delta)
return jiffies_to_clock_t(max(0L, delta));
}
+static inline unsigned int jiffies_delta_to_msecs(long delta)
+{
+ return jiffies_to_msecs(max(0L, delta));
+}
+
extern unsigned long clock_t_to_jiffies(unsigned long x);
extern u64 jiffies_64_to_clock_t(u64 x);
extern u64 nsec_to_clock_t(u64 x);
diff --git a/include/linux/joystick.h b/include/linux/joystick.h
index cbf2aa9e93b9..5153f5b9294c 100644
--- a/include/linux/joystick.h
+++ b/include/linux/joystick.h
@@ -17,10 +17,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
- * Vojtech Pavlik, Ucitelska 1576, Prague 8, 182 00 Czech Republic
*/
#ifndef _LINUX_JOYSTICK_H
#define _LINUX_JOYSTICK_H
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index b46b541c67c4..1a0b6f17a5d6 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -299,12 +299,18 @@ struct static_key_false {
#define DEFINE_STATIC_KEY_TRUE(name) \
struct static_key_true name = STATIC_KEY_TRUE_INIT
+#define DEFINE_STATIC_KEY_TRUE_RO(name) \
+ struct static_key_true name __ro_after_init = STATIC_KEY_TRUE_INIT
+
#define DECLARE_STATIC_KEY_TRUE(name) \
extern struct static_key_true name
#define DEFINE_STATIC_KEY_FALSE(name) \
struct static_key_false name = STATIC_KEY_FALSE_INIT
+#define DEFINE_STATIC_KEY_FALSE_RO(name) \
+ struct static_key_false name __ro_after_init = STATIC_KEY_FALSE_INIT
+
#define DECLARE_STATIC_KEY_FALSE(name) \
extern struct static_key_false name
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index de784fd11d12..46aae129917c 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -20,7 +20,7 @@ extern pmd_t kasan_zero_pmd[PTRS_PER_PMD];
extern pud_t kasan_zero_pud[PTRS_PER_PUD];
extern p4d_t kasan_zero_p4d[MAX_PTRS_PER_P4D];
-void kasan_populate_zero_shadow(const void *shadow_start,
+int kasan_populate_zero_shadow(const void *shadow_start,
const void *shadow_end);
static inline void *kasan_mem_to_shadow(const void *addr)
@@ -71,6 +71,9 @@ struct kasan_cache {
int kasan_module_alloc(void *addr, size_t size);
void kasan_free_shadow(const struct vm_struct *vm);
+int kasan_add_zero_shadow(void *start, unsigned long size);
+void kasan_remove_zero_shadow(void *start, unsigned long size);
+
size_t ksize(const void *);
static inline void kasan_unpoison_slab(const void *ptr) { ksize(ptr); }
size_t kasan_metadata_size(struct kmem_cache *cache);
@@ -124,6 +127,14 @@ static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
static inline void kasan_free_shadow(const struct vm_struct *vm) {}
+static inline int kasan_add_zero_shadow(void *start, unsigned long size)
+{
+ return 0;
+}
+static inline void kasan_remove_zero_shadow(void *start,
+ unsigned long size)
+{}
+
static inline void kasan_unpoison_slab(const void *ptr) { }
static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
diff --git a/include/linux/kcore.h b/include/linux/kcore.h
index 8de55e4b5ee9..c20f296438fb 100644
--- a/include/linux/kcore.h
+++ b/include/linux/kcore.h
@@ -35,7 +35,7 @@ struct vmcoredd_node {
};
#ifdef CONFIG_PROC_KCORE
-extern void kclist_add(struct kcore_list *, void *, size_t, int type);
+void __init kclist_add(struct kcore_list *, void *, size_t, int type);
#else
static inline
void kclist_add(struct kcore_list *new, void *addr, size_t size, int type)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d23123238534..d6aac75b51ba 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -85,7 +85,23 @@
* arguments just once each.
*/
#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+/**
+ * round_up - round up to next specified power of 2
+ * @x: the value to round
+ * @y: multiple to round up to (must be a power of 2)
+ *
+ * Rounds @x up to next multiple of @y (which must be a power of 2).
+ * To perform arbitrary rounding up, use roundup() below.
+ */
#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+/**
+ * round_down - round down to next specified power of 2
+ * @x: the value to round
+ * @y: multiple to round down to (must be a power of 2)
+ *
+ * Rounds @x down to next multiple of @y (which must be a power of 2).
+ * To perform arbitrary rounding down, use rounddown() below.
+ */
#define round_down(x, y) ((x) & ~__round_mask(x, y))
/**
@@ -110,13 +126,30 @@
# define DIV_ROUND_UP_SECTOR_T(ll,d) DIV_ROUND_UP(ll,d)
#endif
-/* The `const' in roundup() prevents gcc-3.3 from calling __divdi3 */
+/**
+ * roundup - round up to the next specified multiple
+ * @x: the value to up
+ * @y: multiple to round up to
+ *
+ * Rounds @x up to next multiple of @y. If @y will always be a power
+ * of 2, consider using the faster round_up().
+ *
+ * The `const' here prevents gcc-3.3 from calling __divdi3
+ */
#define roundup(x, y) ( \
{ \
const typeof(y) __y = y; \
(((x) + (__y - 1)) / __y) * __y; \
} \
)
+/**
+ * rounddown - round down to next specified multiple
+ * @x: the value to round
+ * @y: multiple to round down to
+ *
+ * Rounds @x down to next multiple of @y. If @y will always be a power
+ * of 2, consider using the faster round_down().
+ */
#define rounddown(x, y) ( \
{ \
typeof(x) __x = (x); \
@@ -666,7 +699,7 @@ do { \
* your code. (Extra memory is used for special buffers that are
* allocated when trace_printk() is used.)
*
- * A little optization trick is done here. If there's only one
+ * A little optimization trick is done here. If there's only one
* argument, there's no need to scan the string for printf formats.
* The trace_puts() will suffice. But how can we take advantage of
* using trace_puts() when trace_printk() has only one argument?
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index ab25c8b6d9e3..814643f7ee52 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -15,6 +15,7 @@
#include <linux/lockdep.h>
#include <linux/rbtree.h>
#include <linux/atomic.h>
+#include <linux/uidgid.h>
#include <linux/wait.h>
struct file;
@@ -325,12 +326,14 @@ void kernfs_destroy_root(struct kernfs_root *root);
struct kernfs_node *kernfs_create_dir_ns(struct kernfs_node *parent,
const char *name, umode_t mode,
+ kuid_t uid, kgid_t gid,
void *priv, const void *ns);
struct kernfs_node *kernfs_create_empty_dir(struct kernfs_node *parent,
const char *name);
struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
- const char *name,
- umode_t mode, loff_t size,
+ const char *name, umode_t mode,
+ kuid_t uid, kgid_t gid,
+ loff_t size,
const struct kernfs_ops *ops,
void *priv, const void *ns,
struct lock_class_key *key);
@@ -415,12 +418,14 @@ static inline void kernfs_destroy_root(struct kernfs_root *root) { }
static inline struct kernfs_node *
kernfs_create_dir_ns(struct kernfs_node *parent, const char *name,
- umode_t mode, void *priv, const void *ns)
+ umode_t mode, kuid_t uid, kgid_t gid,
+ void *priv, const void *ns)
{ return ERR_PTR(-ENOSYS); }
static inline struct kernfs_node *
__kernfs_create_file(struct kernfs_node *parent, const char *name,
- umode_t mode, loff_t size, const struct kernfs_ops *ops,
+ umode_t mode, kuid_t uid, kgid_t gid,
+ loff_t size, const struct kernfs_ops *ops,
void *priv, const void *ns, struct lock_class_key *key)
{ return ERR_PTR(-ENOSYS); }
@@ -498,12 +503,15 @@ static inline struct kernfs_node *
kernfs_create_dir(struct kernfs_node *parent, const char *name, umode_t mode,
void *priv)
{
- return kernfs_create_dir_ns(parent, name, mode, priv, NULL);
+ return kernfs_create_dir_ns(parent, name, mode,
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
+ priv, NULL);
}
static inline struct kernfs_node *
kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
- umode_t mode, loff_t size, const struct kernfs_ops *ops,
+ umode_t mode, kuid_t uid, kgid_t gid,
+ loff_t size, const struct kernfs_ops *ops,
void *priv, const void *ns)
{
struct lock_class_key *key = NULL;
@@ -511,15 +519,17 @@ kernfs_create_file_ns(struct kernfs_node *parent, const char *name,
#ifdef CONFIG_DEBUG_LOCK_ALLOC
key = (struct lock_class_key *)&ops->lockdep_key;
#endif
- return __kernfs_create_file(parent, name, mode, size, ops, priv, ns,
- key);
+ return __kernfs_create_file(parent, name, mode, uid, gid,
+ size, ops, priv, ns, key);
}
static inline struct kernfs_node *
kernfs_create_file(struct kernfs_node *parent, const char *name, umode_t mode,
loff_t size, const struct kernfs_ops *ops, void *priv)
{
- return kernfs_create_file_ns(parent, name, mode, size, ops, priv, NULL);
+ return kernfs_create_file_ns(parent, name, mode,
+ GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
+ size, ops, priv, NULL);
}
static inline int kernfs_remove_by_name(struct kernfs_node *parent,
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index 7f6f93c3df9c..1ab0d624fb36 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -26,6 +26,7 @@
#include <linux/wait.h>
#include <linux/atomic.h>
#include <linux/workqueue.h>
+#include <linux/uidgid.h>
#define UEVENT_HELPER_PATH_LEN 256
#define UEVENT_NUM_ENVP 32 /* number of env pointers */
@@ -114,14 +115,34 @@ extern struct kobject * __must_check kobject_get_unless_zero(
extern void kobject_put(struct kobject *kobj);
extern const void *kobject_namespace(struct kobject *kobj);
+extern void kobject_get_ownership(struct kobject *kobj,
+ kuid_t *uid, kgid_t *gid);
extern char *kobject_get_path(struct kobject *kobj, gfp_t flag);
+/**
+ * kobject_has_children - Returns whether a kobject has children.
+ * @kobj: the object to test
+ *
+ * This will return whether a kobject has other kobjects as children.
+ *
+ * It does NOT account for the presence of attribute files, only sub
+ * directories. It also assumes there is no concurrent addition or
+ * removal of such children, and thus relies on external locking.
+ */
+static inline bool kobject_has_children(struct kobject *kobj)
+{
+ WARN_ON_ONCE(kref_read(&kobj->kref) == 0);
+
+ return kobj->sd && kobj->sd->dir.subdirs;
+}
+
struct kobj_type {
void (*release)(struct kobject *kobj);
const struct sysfs_ops *sysfs_ops;
struct attribute **default_attrs;
const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
const void *(*namespace)(struct kobject *kobj);
+ void (*get_ownership)(struct kobject *kobj, kuid_t *uid, kgid_t *gid);
};
struct kobj_uevent_env {
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 9440a2fc8893..e909413e4e38 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -63,7 +63,6 @@ struct pt_regs;
struct kretprobe;
struct kretprobe_instance;
typedef int (*kprobe_pre_handler_t) (struct kprobe *, struct pt_regs *);
-typedef int (*kprobe_break_handler_t) (struct kprobe *, struct pt_regs *);
typedef void (*kprobe_post_handler_t) (struct kprobe *, struct pt_regs *,
unsigned long flags);
typedef int (*kprobe_fault_handler_t) (struct kprobe *, struct pt_regs *,
@@ -101,12 +100,6 @@ struct kprobe {
*/
kprobe_fault_handler_t fault_handler;
- /*
- * ... called if breakpoint trap occurs in probe handler.
- * Return 1 if it handled break, otherwise kernel will see it.
- */
- kprobe_break_handler_t break_handler;
-
/* Saved opcode (which has been replaced with breakpoint) */
kprobe_opcode_t opcode;
@@ -155,24 +148,6 @@ static inline int kprobe_ftrace(struct kprobe *p)
}
/*
- * Special probe type that uses setjmp-longjmp type tricks to resume
- * execution at a specified entry with a matching prototype corresponding
- * to the probed function - a trick to enable arguments to become
- * accessible seamlessly by probe handling logic.
- * Note:
- * Because of the way compilers allocate stack space for local variables
- * etc upfront, regardless of sub-scopes within a function, this mirroring
- * principle currently works only for probes placed on function entry points.
- */
-struct jprobe {
- struct kprobe kp;
- void *entry; /* probe handling code to jump to */
-};
-
-/* For backward compatibility with old code using JPROBE_ENTRY() */
-#define JPROBE_ENTRY(handler) (handler)
-
-/*
* Function-return probe -
* Note:
* User needs to provide a handler function, and initialize maxactive.
@@ -389,9 +364,6 @@ int register_kprobe(struct kprobe *p);
void unregister_kprobe(struct kprobe *p);
int register_kprobes(struct kprobe **kps, int num);
void unregister_kprobes(struct kprobe **kps, int num);
-int setjmp_pre_handler(struct kprobe *, struct pt_regs *);
-int longjmp_break_handler(struct kprobe *, struct pt_regs *);
-void jprobe_return(void);
unsigned long arch_deref_entry_point(void *);
int register_kretprobe(struct kretprobe *rp);
@@ -439,9 +411,6 @@ static inline void unregister_kprobe(struct kprobe *p)
static inline void unregister_kprobes(struct kprobe **kps, int num)
{
}
-static inline void jprobe_return(void)
-{
-}
static inline int register_kretprobe(struct kretprobe *rp)
{
return -ENOSYS;
@@ -468,20 +437,6 @@ static inline int enable_kprobe(struct kprobe *kp)
return -ENOSYS;
}
#endif /* CONFIG_KPROBES */
-static inline int register_jprobe(struct jprobe *p)
-{
- return -ENOSYS;
-}
-static inline int register_jprobes(struct jprobe **jps, int num)
-{
- return -ENOSYS;
-}
-static inline void unregister_jprobe(struct jprobe *p)
-{
-}
-static inline void unregister_jprobes(struct jprobe **jps, int num)
-{
-}
static inline int disable_kretprobe(struct kretprobe *rp)
{
return disable_kprobe(&rp->kp);
@@ -490,14 +445,6 @@ static inline int enable_kretprobe(struct kretprobe *rp)
{
return enable_kprobe(&rp->kp);
}
-static inline int disable_jprobe(struct jprobe *jp)
-{
- return -ENOSYS;
-}
-static inline int enable_jprobe(struct jprobe *jp)
-{
- return -ENOSYS;
-}
#ifndef CONFIG_KPROBES
static inline bool is_kprobe_insn_slot(unsigned long addr)
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 2803264c512f..c1961761311d 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -62,7 +62,6 @@ void *kthread_probe_data(struct task_struct *k);
int kthread_park(struct task_struct *k);
void kthread_unpark(struct task_struct *k);
void kthread_parkme(void);
-void kthread_park_complete(struct task_struct *k);
int kthreadd(void *unused);
extern struct task_struct *kthreadd_task;
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index 5b9fddbaac41..b2bb44f87f5a 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -93,8 +93,11 @@ static inline ktime_t timeval_to_ktime(struct timeval tv)
/* Map the ktime_t to timeval conversion to ns_to_timeval function */
#define ktime_to_timeval(kt) ns_to_timeval((kt))
-/* Convert ktime_t to nanoseconds - NOP in the scalar storage format: */
-#define ktime_to_ns(kt) (kt)
+/* Convert ktime_t to nanoseconds */
+static inline s64 ktime_to_ns(const ktime_t kt)
+{
+ return kt;
+}
/**
* ktime_compare - Compares two ktime_t variables for less, greater or equal
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 4ee7bc548a83..0205aee44ded 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -130,7 +130,7 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQUEST_ARCH_BASE 8
#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
- BUILD_BUG_ON((unsigned)(nr) >= 32 - KVM_REQUEST_ARCH_BASE); \
+ BUILD_BUG_ON((unsigned)(nr) >= (FIELD_SIZEOF(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
(unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
})
#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
@@ -224,7 +224,7 @@ struct kvm_vcpu {
int vcpu_id;
int srcu_idx;
int mode;
- unsigned long requests;
+ u64 requests;
unsigned long guest_debug;
int pre_pcpu;
@@ -309,6 +309,13 @@ static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memsl
return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
}
+static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot)
+{
+ unsigned long len = kvm_dirty_bitmap_bytes(memslot);
+
+ return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap);
+}
+
struct kvm_s390_adapter_int {
u64 ind_addr;
u64 summary_addr;
@@ -827,6 +834,13 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
}
#endif
+#ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB
+static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm)
+{
+ return -ENOTSUPP;
+}
+#endif
+
#ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
@@ -1124,7 +1138,7 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
* caller. Paired with the smp_mb__after_atomic in kvm_check_request.
*/
smp_wmb();
- set_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
+ set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
}
static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
@@ -1134,12 +1148,12 @@ static inline bool kvm_request_pending(struct kvm_vcpu *vcpu)
static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
{
- return test_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
+ return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
}
static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
{
- clear_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
+ clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests);
}
static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
@@ -1275,8 +1289,8 @@ static inline long kvm_arch_vcpu_async_ioctl(struct file *filp,
}
#endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */
-void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
- unsigned long start, unsigned long end);
+int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
+ unsigned long start, unsigned long end, bool blockable);
#ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE
int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu);
diff --git a/include/linux/leds.h b/include/linux/leds.h
index b7e82550e655..834683d603f9 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -253,7 +253,7 @@ static inline bool led_sysfs_is_disabled(struct led_classdev *led_cdev)
struct led_trigger {
/* Trigger Properties */
const char *name;
- void (*activate)(struct led_classdev *led_cdev);
+ int (*activate)(struct led_classdev *led_cdev);
void (*deactivate)(struct led_classdev *led_cdev);
/* LEDs under control by this trigger (for simple triggers) */
@@ -262,8 +262,19 @@ struct led_trigger {
/* Link to next registered trigger */
struct list_head next_trig;
+
+ const struct attribute_group **groups;
};
+/*
+ * Currently the attributes in struct led_trigger::groups are added directly to
+ * the LED device. As this might change in the future, the following
+ * macros abstract getting the LED device and its trigger_data from the dev
+ * parameter passed to the attribute accessor functions.
+ */
+#define led_trigger_get_led(dev) ((struct led_classdev *)dev_get_drvdata((dev)))
+#define led_trigger_get_drvdata(dev) (led_get_trigger_data(led_trigger_get_led(dev)))
+
ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count);
ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
@@ -288,10 +299,16 @@ extern void led_trigger_blink_oneshot(struct led_trigger *trigger,
unsigned long *delay_off,
int invert);
extern void led_trigger_set_default(struct led_classdev *led_cdev);
-extern void led_trigger_set(struct led_classdev *led_cdev,
- struct led_trigger *trigger);
+extern int led_trigger_set(struct led_classdev *led_cdev,
+ struct led_trigger *trigger);
extern void led_trigger_remove(struct led_classdev *led_cdev);
+static inline void led_set_trigger_data(struct led_classdev *led_cdev,
+ void *trigger_data)
+{
+ led_cdev->trigger_data = trigger_data;
+}
+
static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
{
return led_cdev->trigger_data;
@@ -315,6 +332,10 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
extern void led_trigger_rename_static(const char *name,
struct led_trigger *trig);
+#define module_led_trigger(__led_trigger) \
+ module_driver(__led_trigger, led_trigger_register, \
+ led_trigger_unregister)
+
#else
/* Trigger has no members */
@@ -334,9 +355,14 @@ static inline void led_trigger_blink_oneshot(struct led_trigger *trigger,
unsigned long *delay_off,
int invert) {}
static inline void led_trigger_set_default(struct led_classdev *led_cdev) {}
-static inline void led_trigger_set(struct led_classdev *led_cdev,
- struct led_trigger *trigger) {}
+static inline int led_trigger_set(struct led_classdev *led_cdev,
+ struct led_trigger *trigger)
+{
+ return 0;
+}
+
static inline void led_trigger_remove(struct led_classdev *led_cdev) {}
+static inline void led_set_trigger_data(struct led_classdev *led_cdev) {}
static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
{
return NULL;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 80c2bd202367..38c95d66ab12 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -210,6 +210,7 @@ enum {
ATA_FLAG_SLAVE_POSS = (1 << 0), /* host supports slave dev */
/* (doesn't imply presence) */
ATA_FLAG_SATA = (1 << 1),
+ ATA_FLAG_NO_LPM = (1 << 2), /* host not happy with LPM */
ATA_FLAG_NO_LOG_PAGE = (1 << 5), /* do not issue log page read */
ATA_FLAG_NO_ATAPI = (1 << 6), /* No ATAPI support */
ATA_FLAG_PIO_DMA = (1 << 7), /* PIO cmds via DMA */
@@ -1111,6 +1112,8 @@ extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
const struct ata_port_info * const * ppi, int n_ports);
extern int ata_slave_link_init(struct ata_port *ap);
+extern void ata_host_get(struct ata_host *host);
+extern void ata_host_put(struct ata_host *host);
extern int ata_host_start(struct ata_host *host);
extern int ata_host_register(struct ata_host *host,
struct scsi_host_template *sht);
@@ -1496,6 +1499,29 @@ static inline bool ata_tag_valid(unsigned int tag)
return tag < ATA_MAX_QUEUE || ata_tag_internal(tag);
}
+#define __ata_qc_for_each(ap, qc, tag, max_tag, fn) \
+ for ((tag) = 0; (tag) < (max_tag) && \
+ ({ qc = fn((ap), (tag)); 1; }); (tag)++) \
+
+/*
+ * Internal use only, iterate commands ignoring error handling and
+ * status of 'qc'.
+ */
+#define ata_qc_for_each_raw(ap, qc, tag) \
+ __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, __ata_qc_from_tag)
+
+/*
+ * Iterate all potential commands that can be queued
+ */
+#define ata_qc_for_each(ap, qc, tag) \
+ __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE, ata_qc_from_tag)
+
+/*
+ * Like ata_qc_for_each, but with the internal tag included
+ */
+#define ata_qc_for_each_with_internal(ap, qc, tag) \
+ __ata_qc_for_each(ap, qc, tag, ATA_MAX_QUEUE + 1, ata_qc_from_tag)
+
/*
* device helpers
*/
diff --git a/include/linux/list.h b/include/linux/list.h
index 4b129df4d46b..de04cc5ed536 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -285,6 +285,36 @@ static inline void list_cut_position(struct list_head *list,
__list_cut_position(list, head, entry);
}
+/**
+ * list_cut_before - cut a list into two, before given entry
+ * @list: a new list to add all removed entries
+ * @head: a list with entries
+ * @entry: an entry within head, could be the head itself
+ *
+ * This helper moves the initial part of @head, up to but
+ * excluding @entry, from @head to @list. You should pass
+ * in @entry an element you know is on @head. @list should
+ * be an empty list or a list you do not care about losing
+ * its data.
+ * If @entry == @head, all entries on @head are moved to
+ * @list.
+ */
+static inline void list_cut_before(struct list_head *list,
+ struct list_head *head,
+ struct list_head *entry)
+{
+ if (head->next == entry) {
+ INIT_LIST_HEAD(list);
+ return;
+ }
+ list->next = head->next;
+ list->next->prev = list;
+ list->prev = entry->prev;
+ list->prev->next = list;
+ head->next = entry;
+ entry->prev = head;
+}
+
static inline void __list_splice(const struct list_head *list,
struct list_head *prev,
struct list_head *next)
diff --git a/include/linux/list_lru.h b/include/linux/list_lru.h
index 96def9d15b1b..aa5efd9351eb 100644
--- a/include/linux/list_lru.h
+++ b/include/linux/list_lru.h
@@ -42,7 +42,7 @@ struct list_lru_node {
spinlock_t lock;
/* global list, used for the root cgroup in cgroup aware lrus */
struct list_lru_one lru;
-#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
+#ifdef CONFIG_MEMCG_KMEM
/* for cgroup aware lrus points to per cgroup lists, otherwise NULL */
struct list_lru_memcg __rcu *memcg_lrus;
#endif
@@ -51,21 +51,25 @@ struct list_lru_node {
struct list_lru {
struct list_lru_node *node;
-#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
+#ifdef CONFIG_MEMCG_KMEM
struct list_head list;
+ int shrinker_id;
#endif
};
void list_lru_destroy(struct list_lru *lru);
int __list_lru_init(struct list_lru *lru, bool memcg_aware,
- struct lock_class_key *key);
+ struct lock_class_key *key, struct shrinker *shrinker);
-#define list_lru_init(lru) __list_lru_init((lru), false, NULL)
-#define list_lru_init_key(lru, key) __list_lru_init((lru), false, (key))
-#define list_lru_init_memcg(lru) __list_lru_init((lru), true, NULL)
+#define list_lru_init(lru) \
+ __list_lru_init((lru), false, NULL, NULL)
+#define list_lru_init_key(lru, key) \
+ __list_lru_init((lru), false, (key), NULL)
+#define list_lru_init_memcg(lru, shrinker) \
+ __list_lru_init((lru), true, NULL, shrinker)
int memcg_update_all_list_lrus(int num_memcgs);
-void memcg_drain_all_list_lrus(int src_idx, int dst_idx);
+void memcg_drain_all_list_lrus(int src_idx, struct mem_cgroup *dst_memcg);
/**
* list_lru_add: add an element to the lru list's tail
@@ -162,6 +166,23 @@ unsigned long list_lru_walk_one(struct list_lru *lru,
int nid, struct mem_cgroup *memcg,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk);
+/**
+ * list_lru_walk_one_irq: walk a list_lru, isolating and disposing freeable items.
+ * @lru: the lru pointer.
+ * @nid: the node id to scan from.
+ * @memcg: the cgroup to scan from.
+ * @isolate: callback function that is resposible for deciding what to do with
+ * the item currently being scanned
+ * @cb_arg: opaque type that will be passed to @isolate
+ * @nr_to_walk: how many items to scan.
+ *
+ * Same as @list_lru_walk_one except that the spinlock is acquired with
+ * spin_lock_irq().
+ */
+unsigned long list_lru_walk_one_irq(struct list_lru *lru,
+ int nid, struct mem_cgroup *memcg,
+ list_lru_walk_cb isolate, void *cb_arg,
+ unsigned long *nr_to_walk);
unsigned long list_lru_walk_node(struct list_lru *lru, int nid,
list_lru_walk_cb isolate, void *cb_arg,
unsigned long *nr_to_walk);
@@ -175,6 +196,14 @@ list_lru_shrink_walk(struct list_lru *lru, struct shrink_control *sc,
}
static inline unsigned long
+list_lru_shrink_walk_irq(struct list_lru *lru, struct shrink_control *sc,
+ list_lru_walk_cb isolate, void *cb_arg)
+{
+ return list_lru_walk_one_irq(lru, sc->nid, sc->memcg, isolate, cb_arg,
+ &sc->nr_to_scan);
+}
+
+static inline unsigned long
list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
void *cb_arg, unsigned long nr_to_walk)
{
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 4fd95dbeb52f..b065ef406770 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -299,7 +299,7 @@ int nlmsvc_unlock_all_by_ip(struct sockaddr *server_addr);
static inline struct inode *nlmsvc_file_inode(struct nlm_file *file)
{
- return file_inode(file->f_file);
+ return locks_inode(file->f_file);
}
static inline int __nlm_privileged_request4(const struct sockaddr *sap)
@@ -359,7 +359,7 @@ static inline int nlm_privileged_requester(const struct svc_rqst *rqstp)
static inline int nlm_compare_locks(const struct file_lock *fl1,
const struct file_lock *fl2)
{
- return file_inode(fl1->fl_file) == file_inode(fl2->fl_file)
+ return locks_inode(fl1->fl_file) == locks_inode(fl2->fl_file)
&& fl1->fl_pid == fl2->fl_pid
&& fl1->fl_owner == fl2->fl_owner
&& fl1->fl_start == fl2->fl_start
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 6fc77d4dbdcd..b0d0b51c4d85 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -266,7 +266,7 @@ struct held_lock {
/*
* Initialization, self-test and debugging-output methods:
*/
-extern void lockdep_info(void);
+extern void lockdep_init(void);
extern void lockdep_reset(void);
extern void lockdep_reset_lock(struct lockdep_map *lock);
extern void lockdep_free_key_range(void *start, unsigned long size);
@@ -406,7 +406,7 @@ static inline void lockdep_on(void)
# define lock_downgrade(l, i) do { } while (0)
# define lock_set_class(l, n, k, s, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
-# define lockdep_info() do { } while (0)
+# define lockdep_init() do { } while (0)
# define lockdep_init_map(lock, name, key, sub) \
do { (void)(name); (void)(key); } while (0)
# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
@@ -532,7 +532,7 @@ do { \
#endif /* CONFIG_LOCKDEP */
-#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_PROVE_LOCKING
extern void print_irqtrace_events(struct task_struct *curr);
#else
static inline void print_irqtrace_events(struct task_struct *curr)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 8f1131c8dd54..97a020c616ad 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -576,6 +576,10 @@
* userspace to load a kernel module with the given name.
* @kmod_name name of the module requested by the kernel
* Return 0 if successful.
+ * @kernel_load_data:
+ * Load data provided by userspace.
+ * @id kernel load data identifier
+ * Return 0 if permission is granted.
* @kernel_read_file:
* Read a file specified by userspace.
* @file contains the file structure pointing to the file being read
@@ -1569,7 +1573,7 @@ union security_list_options {
int (*file_send_sigiotask)(struct task_struct *tsk,
struct fown_struct *fown, int sig);
int (*file_receive)(struct file *file);
- int (*file_open)(struct file *file, const struct cred *cred);
+ int (*file_open)(struct file *file);
int (*task_alloc)(struct task_struct *task, unsigned long clone_flags);
void (*task_free)(struct task_struct *task);
@@ -1582,6 +1586,7 @@ union security_list_options {
int (*kernel_act_as)(struct cred *new, u32 secid);
int (*kernel_create_files_as)(struct cred *new, struct inode *inode);
int (*kernel_module_request)(char *kmod_name);
+ int (*kernel_load_data)(enum kernel_load_data_id id);
int (*kernel_read_file)(struct file *file, enum kernel_read_file_id id);
int (*kernel_post_read_file)(struct file *file, char *buf, loff_t size,
enum kernel_read_file_id id);
@@ -1872,6 +1877,7 @@ struct security_hook_heads {
struct hlist_head cred_getsecid;
struct hlist_head kernel_act_as;
struct hlist_head kernel_create_files_as;
+ struct hlist_head kernel_load_data;
struct hlist_head kernel_read_file;
struct hlist_head kernel_post_read_file;
struct hlist_head kernel_module_request;
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
new file mode 100644
index 000000000000..ccb73422c2fa
--- /dev/null
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 MediaTek Inc.
+ *
+ */
+
+#ifndef __MTK_CMDQ_MAILBOX_H__
+#define __MTK_CMDQ_MAILBOX_H__
+
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#define CMDQ_INST_SIZE 8 /* instruction is 64-bit */
+#define CMDQ_SUBSYS_SHIFT 16
+#define CMDQ_OP_CODE_SHIFT 24
+#define CMDQ_JUMP_PASS CMDQ_INST_SIZE
+
+#define CMDQ_WFE_UPDATE BIT(31)
+#define CMDQ_WFE_WAIT BIT(15)
+#define CMDQ_WFE_WAIT_VALUE 0x1
+
+/*
+ * CMDQ_CODE_MASK:
+ * set write mask
+ * format: op mask
+ * CMDQ_CODE_WRITE:
+ * write value into target register
+ * format: op subsys address value
+ * CMDQ_CODE_JUMP:
+ * jump by offset
+ * format: op offset
+ * CMDQ_CODE_WFE:
+ * wait for event and clear
+ * it is just clear if no wait
+ * format: [wait] op event update:1 to_wait:1 wait:1
+ * [clear] op event update:1 to_wait:0 wait:0
+ * CMDQ_CODE_EOC:
+ * end of command
+ * format: op irq_flag
+ */
+enum cmdq_code {
+ CMDQ_CODE_MASK = 0x02,
+ CMDQ_CODE_WRITE = 0x04,
+ CMDQ_CODE_JUMP = 0x10,
+ CMDQ_CODE_WFE = 0x20,
+ CMDQ_CODE_EOC = 0x40,
+};
+
+enum cmdq_cb_status {
+ CMDQ_CB_NORMAL = 0,
+ CMDQ_CB_ERROR
+};
+
+struct cmdq_cb_data {
+ enum cmdq_cb_status sta;
+ void *data;
+};
+
+typedef void (*cmdq_async_flush_cb)(struct cmdq_cb_data data);
+
+struct cmdq_task_cb {
+ cmdq_async_flush_cb cb;
+ void *data;
+};
+
+struct cmdq_pkt {
+ void *va_base;
+ dma_addr_t pa_base;
+ size_t cmd_buf_size; /* command occupied size */
+ size_t buf_size; /* real buffer size */
+ struct cmdq_task_cb cb;
+ struct cmdq_task_cb async_cb;
+ void *cl;
+};
+
+#endif /* __MTK_CMDQ_MAILBOX_H__ */
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index 4f5f8c21e283..1eb6f244588d 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -27,6 +27,8 @@
*/
#define MARVELL_PHY_ID_88E6390 0x01410f90
+#define MARVELL_PHY_FAMILY_ID(id) ((id) >> 4)
+
/* struct phy_device dev_flags definitions */
#define MARVELL_PHY_M1145_FLAGS_RESISTANCE 0x00000001
#define MARVELL_PHY_M1118_DNS323_LEDS 0x00000002
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index ca59883c8364..516920549378 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -20,31 +20,60 @@
#define INIT_MEMBLOCK_REGIONS 128
#define INIT_PHYSMEM_REGIONS 4
-/* Definition of memblock flags. */
-enum {
+/**
+ * enum memblock_flags - definition of memory region attributes
+ * @MEMBLOCK_NONE: no special request
+ * @MEMBLOCK_HOTPLUG: hotpluggable region
+ * @MEMBLOCK_MIRROR: mirrored region
+ * @MEMBLOCK_NOMAP: don't add to kernel direct mapping
+ */
+enum memblock_flags {
MEMBLOCK_NONE = 0x0, /* No special request */
MEMBLOCK_HOTPLUG = 0x1, /* hotpluggable region */
MEMBLOCK_MIRROR = 0x2, /* mirrored region */
MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
};
+/**
+ * struct memblock_region - represents a memory region
+ * @base: physical address of the region
+ * @size: size of the region
+ * @flags: memory region attributes
+ * @nid: NUMA node id
+ */
struct memblock_region {
phys_addr_t base;
phys_addr_t size;
- unsigned long flags;
+ enum memblock_flags flags;
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
int nid;
#endif
};
+/**
+ * struct memblock_type - collection of memory regions of certain type
+ * @cnt: number of regions
+ * @max: size of the allocated array
+ * @total_size: size of all regions
+ * @regions: array of regions
+ * @name: the memory type symbolic name
+ */
struct memblock_type {
- unsigned long cnt; /* number of regions */
- unsigned long max; /* size of the allocated array */
- phys_addr_t total_size; /* size of all regions */
+ unsigned long cnt;
+ unsigned long max;
+ phys_addr_t total_size;
struct memblock_region *regions;
char *name;
};
+/**
+ * struct memblock - memblock allocator metadata
+ * @bottom_up: is bottom up direction?
+ * @current_limit: physical address of the current allocation limit
+ * @memory: usabe memory regions
+ * @reserved: reserved memory regions
+ * @physmem: all physical memory
+ */
struct memblock {
bool bottom_up; /* is bottom up direction? */
phys_addr_t current_limit;
@@ -72,7 +101,7 @@ void memblock_discard(void);
phys_addr_t memblock_find_in_range_node(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end,
- int nid, ulong flags);
+ int nid, enum memblock_flags flags);
phys_addr_t memblock_find_in_range(phys_addr_t start, phys_addr_t end,
phys_addr_t size, phys_addr_t align);
void memblock_allow_resize(void);
@@ -89,19 +118,19 @@ int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
-ulong choose_memblock_flags(void);
+enum memblock_flags choose_memblock_flags(void);
/* Low level functions */
int memblock_add_range(struct memblock_type *type,
phys_addr_t base, phys_addr_t size,
- int nid, unsigned long flags);
+ int nid, enum memblock_flags flags);
-void __next_mem_range(u64 *idx, int nid, ulong flags,
+void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
struct memblock_type *type_a,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
-void __next_mem_range_rev(u64 *idx, int nid, ulong flags,
+void __next_mem_range_rev(u64 *idx, int nid, enum memblock_flags flags,
struct memblock_type *type_a,
struct memblock_type *type_b, phys_addr_t *out_start,
phys_addr_t *out_end, int *out_nid);
@@ -239,7 +268,6 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
/**
* for_each_resv_unavail_range - iterate through reserved and unavailable memory
* @i: u64 used as loop variable
- * @flags: pick from blocks based on memory attributes
* @p_start: ptr to phys_addr_t for start address of the range, can be %NULL
* @p_end: ptr to phys_addr_t for end address of the range, can be %NULL
*
@@ -253,13 +281,13 @@ void __next_mem_pfn_range(int *idx, int nid, unsigned long *out_start_pfn,
NUMA_NO_NODE, MEMBLOCK_NONE, p_start, p_end, NULL)
static inline void memblock_set_region_flags(struct memblock_region *r,
- unsigned long flags)
+ enum memblock_flags flags)
{
r->flags |= flags;
}
static inline void memblock_clear_region_flags(struct memblock_region *r,
- unsigned long flags)
+ enum memblock_flags flags)
{
r->flags &= ~flags;
}
@@ -317,10 +345,10 @@ static inline bool memblock_bottom_up(void)
phys_addr_t __init memblock_alloc_range(phys_addr_t size, phys_addr_t align,
phys_addr_t start, phys_addr_t end,
- ulong flags);
+ enum memblock_flags flags);
phys_addr_t memblock_alloc_base_nid(phys_addr_t size,
phys_addr_t align, phys_addr_t max_addr,
- int nid, ulong flags);
+ int nid, enum memblock_flags flags);
phys_addr_t memblock_alloc_base(phys_addr_t size, phys_addr_t align,
phys_addr_t max_addr);
phys_addr_t __memblock_alloc_base(phys_addr_t size, phys_addr_t align,
@@ -367,8 +395,10 @@ phys_addr_t memblock_get_current_limit(void);
*/
/**
- * memblock_region_memory_base_pfn - Return the lowest pfn intersecting with the memory region
+ * memblock_region_memory_base_pfn - get the lowest pfn of the memory region
* @reg: memblock_region structure
+ *
+ * Return: the lowest pfn intersecting with the memory region
*/
static inline unsigned long memblock_region_memory_base_pfn(const struct memblock_region *reg)
{
@@ -376,8 +406,10 @@ static inline unsigned long memblock_region_memory_base_pfn(const struct membloc
}
/**
- * memblock_region_memory_end_pfn - Return the end_pfn this region
+ * memblock_region_memory_end_pfn - get the end pfn of the memory region
* @reg: memblock_region structure
+ *
+ * Return: the end_pfn of the reserved region
*/
static inline unsigned long memblock_region_memory_end_pfn(const struct memblock_region *reg)
{
@@ -385,8 +417,10 @@ static inline unsigned long memblock_region_memory_end_pfn(const struct memblock
}
/**
- * memblock_region_reserved_base_pfn - Return the lowest pfn intersecting with the reserved region
+ * memblock_region_reserved_base_pfn - get the lowest pfn of the reserved region
* @reg: memblock_region structure
+ *
+ * Return: the lowest pfn intersecting with the reserved region
*/
static inline unsigned long memblock_region_reserved_base_pfn(const struct memblock_region *reg)
{
@@ -394,8 +428,10 @@ static inline unsigned long memblock_region_reserved_base_pfn(const struct membl
}
/**
- * memblock_region_reserved_end_pfn - Return the end_pfn this region
+ * memblock_region_reserved_end_pfn - get the end pfn of the reserved region
* @reg: memblock_region structure
+ *
+ * Return: the end_pfn of the reserved region
*/
static inline unsigned long memblock_region_reserved_end_pfn(const struct memblock_region *reg)
{
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 6c6fb116e925..652f602167df 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -112,6 +112,15 @@ struct lruvec_stat {
};
/*
+ * Bitmap of shrinker::id corresponding to memcg-aware shrinkers,
+ * which have elements charged to this memcg.
+ */
+struct memcg_shrinker_map {
+ struct rcu_head rcu;
+ unsigned long map[0];
+};
+
+/*
* per-zone information in memory controller.
*/
struct mem_cgroup_per_node {
@@ -124,6 +133,9 @@ struct mem_cgroup_per_node {
struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
+#ifdef CONFIG_MEMCG_KMEM
+ struct memcg_shrinker_map __rcu *shrinker_map;
+#endif
struct rb_node tree_node; /* RB tree node */
unsigned long usage_in_excess;/* Set to the value by which */
/* the soft limit is exceeded*/
@@ -213,6 +225,11 @@ struct mem_cgroup {
*/
bool use_hierarchy;
+ /*
+ * Should the OOM killer kill all belonging tasks, had it kill one?
+ */
+ bool oom_group;
+
/* protected by memcg_oom_lock */
bool oom_lock;
int under_oom;
@@ -271,7 +288,7 @@ struct mem_cgroup {
bool tcpmem_active;
int tcpmem_pressure;
-#ifndef CONFIG_SLOB
+#ifdef CONFIG_MEMCG_KMEM
/* Index in the kmem_cache->memcg_params.memcg_caches array */
int kmemcg_id;
enum memcg_kmem_state kmem_state;
@@ -306,6 +323,11 @@ struct mem_cgroup {
extern struct mem_cgroup *root_mem_cgroup;
+static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
+{
+ return (memcg == root_mem_cgroup);
+}
+
static inline bool mem_cgroup_disabled(void)
{
return !cgroup_subsys_enabled(memory_cgrp_subsys);
@@ -317,6 +339,9 @@ enum mem_cgroup_protection mem_cgroup_protected(struct mem_cgroup *root,
int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
gfp_t gfp_mask, struct mem_cgroup **memcgp,
bool compound);
+int mem_cgroup_try_charge_delay(struct page *page, struct mm_struct *mm,
+ gfp_t gfp_mask, struct mem_cgroup **memcgp,
+ bool compound);
void mem_cgroup_commit_charge(struct page *page, struct mem_cgroup *memcg,
bool lrucare, bool compound);
void mem_cgroup_cancel_charge(struct page *page, struct mem_cgroup *memcg,
@@ -370,11 +395,21 @@ struct lruvec *mem_cgroup_page_lruvec(struct page *, struct pglist_data *);
bool task_in_mem_cgroup(struct task_struct *task, struct mem_cgroup *memcg);
struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
+struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
+
+struct mem_cgroup *get_mem_cgroup_from_page(struct page *page);
+
static inline
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css){
return css ? container_of(css, struct mem_cgroup, css) : NULL;
}
+static inline void mem_cgroup_put(struct mem_cgroup *memcg)
+{
+ if (memcg)
+ css_put(&memcg->css);
+}
+
#define mem_cgroup_from_counter(counter, member) \
container_of(counter, struct mem_cgroup, member)
@@ -494,16 +529,16 @@ unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
struct task_struct *p);
-static inline void mem_cgroup_oom_enable(void)
+static inline void mem_cgroup_enter_user_fault(void)
{
- WARN_ON(current->memcg_may_oom);
- current->memcg_may_oom = 1;
+ WARN_ON(current->in_user_fault);
+ current->in_user_fault = 1;
}
-static inline void mem_cgroup_oom_disable(void)
+static inline void mem_cgroup_exit_user_fault(void)
{
- WARN_ON(!current->memcg_may_oom);
- current->memcg_may_oom = 0;
+ WARN_ON(!current->in_user_fault);
+ current->in_user_fault = 0;
}
static inline bool task_in_memcg_oom(struct task_struct *p)
@@ -512,6 +547,9 @@ static inline bool task_in_memcg_oom(struct task_struct *p)
}
bool mem_cgroup_oom_synchronize(bool wait);
+struct mem_cgroup *mem_cgroup_get_oom_group(struct task_struct *victim,
+ struct mem_cgroup *oom_domain);
+void mem_cgroup_print_oom_group(struct mem_cgroup *memcg);
#ifdef CONFIG_MEMCG_SWAP
extern int do_swap_account;
@@ -759,6 +797,11 @@ void mem_cgroup_split_huge_fixup(struct page *head);
struct mem_cgroup;
+static inline bool mem_cgroup_is_root(struct mem_cgroup *memcg)
+{
+ return true;
+}
+
static inline bool mem_cgroup_disabled(void)
{
return true;
@@ -789,6 +832,16 @@ static inline int mem_cgroup_try_charge(struct page *page, struct mm_struct *mm,
return 0;
}
+static inline int mem_cgroup_try_charge_delay(struct page *page,
+ struct mm_struct *mm,
+ gfp_t gfp_mask,
+ struct mem_cgroup **memcgp,
+ bool compound)
+{
+ *memcgp = NULL;
+ return 0;
+}
+
static inline void mem_cgroup_commit_charge(struct page *page,
struct mem_cgroup *memcg,
bool lrucare, bool compound)
@@ -837,6 +890,20 @@ static inline bool task_in_mem_cgroup(struct task_struct *task,
return true;
}
+static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
+{
+ return NULL;
+}
+
+static inline struct mem_cgroup *get_mem_cgroup_from_page(struct page *page)
+{
+ return NULL;
+}
+
+static inline void mem_cgroup_put(struct mem_cgroup *memcg)
+{
+}
+
static inline struct mem_cgroup *
mem_cgroup_iter(struct mem_cgroup *root,
struct mem_cgroup *prev,
@@ -924,11 +991,11 @@ static inline void mem_cgroup_handle_over_high(void)
{
}
-static inline void mem_cgroup_oom_enable(void)
+static inline void mem_cgroup_enter_user_fault(void)
{
}
-static inline void mem_cgroup_oom_disable(void)
+static inline void mem_cgroup_exit_user_fault(void)
{
}
@@ -942,6 +1009,16 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
return false;
}
+static inline struct mem_cgroup *mem_cgroup_get_oom_group(
+ struct task_struct *victim, struct mem_cgroup *oom_domain)
+{
+ return NULL;
+}
+
+static inline void mem_cgroup_print_oom_group(struct mem_cgroup *memcg)
+{
+}
+
static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
int idx)
{
@@ -1194,7 +1271,7 @@ int memcg_kmem_charge_memcg(struct page *page, gfp_t gfp, int order,
int memcg_kmem_charge(struct page *page, gfp_t gfp, int order);
void memcg_kmem_uncharge(struct page *page, int order);
-#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
+#ifdef CONFIG_MEMCG_KMEM
extern struct static_key_false memcg_kmem_enabled_key;
extern struct workqueue_struct *memcg_kmem_cache_wq;
@@ -1225,6 +1302,10 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
return memcg ? memcg->kmemcg_id : -1;
}
+extern int memcg_expand_shrinker_maps(int new_id);
+
+extern void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
+ int nid, int shrinker_id);
#else
#define for_each_memcg_cache_index(_idx) \
for (; NULL; )
@@ -1247,6 +1328,8 @@ static inline void memcg_put_cache_ids(void)
{
}
-#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
+static inline void memcg_set_shrinker_bit(struct mem_cgroup *memcg,
+ int nid, int shrinker_id) { }
+#endif /* CONFIG_MEMCG_KMEM */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 31ca3e28b0eb..a6ddefc60517 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -38,6 +38,7 @@ struct memory_block {
int arch_get_memory_phys_device(unsigned long start_pfn);
unsigned long memory_block_size_bytes(void);
+int set_memory_block_size_order(unsigned int order);
/* These states are exposed to userspace as text strings in sysfs */
#define MEM_ONLINE (1<<0) /* exposed to userspace */
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index 4e9828cda7a2..34a28227068d 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -319,6 +319,7 @@ static inline int offline_pages(unsigned long start_pfn, unsigned long nr_pages)
static inline void remove_memory(int nid, u64 start, u64 size) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */
+extern void __ref free_area_init_core_hotplug(int nid);
extern int walk_memory_range(unsigned long start_pfn, unsigned long end_pfn,
void *arg, int (*func)(struct memory_block *, void *));
extern int add_memory(int nid, u64 start, u64 size);
diff --git a/include/linux/mfd/as3722.h b/include/linux/mfd/as3722.h
index 51e6f9414575..b404a5af9bba 100644
--- a/include/linux/mfd/as3722.h
+++ b/include/linux/mfd/as3722.h
@@ -296,6 +296,8 @@
#define AS3722_ADC1_CONV_NOTREADY BIT(7)
#define AS3722_ADC1_SOURCE_SELECT_MASK 0x1F
+#define AS3722_CTRL_SEQU1_AC_OK_PWR_ON BIT(0)
+
/* GPIO modes */
#define AS3722_GPIO_MODE_MASK 0x07
#define AS3722_GPIO_MODE_INPUT 0x00
@@ -391,6 +393,7 @@ struct as3722 {
unsigned long irq_flags;
bool en_intern_int_pullup;
bool en_intern_i2c_pullup;
+ bool en_ac_ok_pwr_on;
struct regmap_irq_chip_data *irq_data;
};
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 32421dfeb996..20949dde35cd 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -147,7 +147,7 @@ struct cros_ec_device {
bool mkbp_event_supported;
struct blocking_notifier_head event_notifier;
- struct ec_response_get_next_event event_data;
+ struct ec_response_get_next_event_v1 event_data;
int event_size;
u32 host_event_wake_mask;
};
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index f2edd9969b40..6e1ab9bead28 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -804,6 +804,8 @@ enum ec_feature_code {
EC_FEATURE_MOTION_SENSE_FIFO = 24,
/* EC has RTC feature that can be controlled by host commands */
EC_FEATURE_RTC = 27,
+ /* EC supports CEC commands */
+ EC_FEATURE_CEC = 35,
};
#define EC_FEATURE_MASK_0(event_code) (1UL << (event_code % 32))
@@ -2078,6 +2080,12 @@ enum ec_mkbp_event {
/* EC sent a sysrq command */
EC_MKBP_EVENT_SYSRQ = 6,
+ /* Notify the AP that something happened on CEC */
+ EC_MKBP_EVENT_CEC_EVENT = 8,
+
+ /* Send an incoming CEC message to the AP */
+ EC_MKBP_EVENT_CEC_MESSAGE = 9,
+
/* Number of MKBP events */
EC_MKBP_EVENT_COUNT,
};
@@ -2093,12 +2101,28 @@ union ec_response_get_next_data {
uint32_t sysrq;
} __packed;
+union ec_response_get_next_data_v1 {
+ uint8_t key_matrix[16];
+ uint32_t host_event;
+ uint32_t buttons;
+ uint32_t switches;
+ uint32_t sysrq;
+ uint32_t cec_events;
+ uint8_t cec_message[16];
+} __packed;
+
struct ec_response_get_next_event {
uint8_t event_type;
/* Followed by event data if any */
union ec_response_get_next_data data;
} __packed;
+struct ec_response_get_next_event_v1 {
+ uint8_t event_type;
+ /* Followed by event data if any */
+ union ec_response_get_next_data_v1 data;
+} __packed;
+
/* Bit indices for buttons and switches.*/
/* Buttons */
#define EC_MKBP_POWER_BUTTON 0
@@ -2593,14 +2617,18 @@ struct ec_params_current_limit {
} __packed;
/*
- * Set maximum external power current.
+ * Set maximum external voltage / current.
*/
-#define EC_CMD_EXT_POWER_CURRENT_LIMIT 0xa2
+#define EC_CMD_EXTERNAL_POWER_LIMIT 0x00A2
-struct ec_params_ext_power_current_limit {
- uint32_t limit; /* in mA */
+/* Command v0 is used only on Spring and is obsolete + unsupported */
+struct ec_params_external_power_limit_v1 {
+ uint16_t current_lim; /* in mA, or EC_POWER_LIMIT_NONE to clear limit */
+ uint16_t voltage_lim; /* in mV, or EC_POWER_LIMIT_NONE to clear limit */
} __packed;
+#define EC_POWER_LIMIT_NONE 0xffff
+
/* Inform the EC when entering a sleep state */
#define EC_CMD_HOST_SLEEP_EVENT 0xa9
@@ -2831,6 +2859,79 @@ struct ec_params_reboot_ec {
/*****************************************************************************/
/*
+ * HDMI CEC commands
+ *
+ * These commands are for sending and receiving message via HDMI CEC
+ */
+#define EC_MAX_CEC_MSG_LEN 16
+
+/* CEC message from the AP to be written on the CEC bus */
+#define EC_CMD_CEC_WRITE_MSG 0x00B8
+
+/**
+ * struct ec_params_cec_write - Message to write to the CEC bus
+ * @msg: message content to write to the CEC bus
+ */
+struct ec_params_cec_write {
+ uint8_t msg[EC_MAX_CEC_MSG_LEN];
+} __packed;
+
+/* Set various CEC parameters */
+#define EC_CMD_CEC_SET 0x00BA
+
+/**
+ * struct ec_params_cec_set - CEC parameters set
+ * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS
+ * @val: in case cmd is CEC_CMD_ENABLE, this field can be 0 to disable CEC
+ * or 1 to enable CEC functionality, in case cmd is CEC_CMD_LOGICAL_ADDRESS,
+ * this field encodes the requested logical address between 0 and 15
+ * or 0xff to unregister
+ */
+struct ec_params_cec_set {
+ uint8_t cmd; /* enum cec_command */
+ uint8_t val;
+} __packed;
+
+/* Read various CEC parameters */
+#define EC_CMD_CEC_GET 0x00BB
+
+/**
+ * struct ec_params_cec_get - CEC parameters get
+ * @cmd: parameter type, can be CEC_CMD_ENABLE or CEC_CMD_LOGICAL_ADDRESS
+ */
+struct ec_params_cec_get {
+ uint8_t cmd; /* enum cec_command */
+} __packed;
+
+/**
+ * struct ec_response_cec_get - CEC parameters get response
+ * @val: in case cmd was CEC_CMD_ENABLE, this field will 0 if CEC is
+ * disabled or 1 if CEC functionality is enabled,
+ * in case cmd was CEC_CMD_LOGICAL_ADDRESS, this will encode the
+ * configured logical address between 0 and 15 or 0xff if unregistered
+ */
+struct ec_response_cec_get {
+ uint8_t val;
+} __packed;
+
+/* CEC parameters command */
+enum ec_cec_command {
+ /* CEC reading, writing and events enable */
+ CEC_CMD_ENABLE,
+ /* CEC logical address */
+ CEC_CMD_LOGICAL_ADDRESS,
+};
+
+/* Events from CEC to AP */
+enum mkbp_cec_event {
+ /* Outgoing message was acknowledged by a follower */
+ EC_MKBP_CEC_SEND_OK = BIT(0),
+ /* Outgoing message was not acknowledged */
+ EC_MKBP_CEC_SEND_FAILED = BIT(1),
+};
+
+/*****************************************************************************/
+/*
* Special commands
*
* These do not follow the normal rules for commands. See each command for
@@ -2974,6 +3075,12 @@ enum usb_chg_type {
USB_CHG_TYPE_VBUS,
USB_CHG_TYPE_UNKNOWN,
};
+enum usb_power_roles {
+ USB_PD_PORT_POWER_DISCONNECTED,
+ USB_PD_PORT_POWER_SOURCE,
+ USB_PD_PORT_POWER_SINK,
+ USB_PD_PORT_POWER_SINK_NOT_CHARGING,
+};
struct usb_chg_measures {
uint16_t voltage_max;
@@ -2991,6 +3098,120 @@ struct ec_response_usb_pd_power_info {
uint32_t max_power;
} __packed;
+struct ec_params_usb_pd_info_request {
+ uint8_t port;
+} __packed;
+
+/* Read USB-PD Device discovery info */
+#define EC_CMD_USB_PD_DISCOVERY 0x0113
+struct ec_params_usb_pd_discovery_entry {
+ uint16_t vid; /* USB-IF VID */
+ uint16_t pid; /* USB-IF PID */
+ uint8_t ptype; /* product type (hub,periph,cable,ama) */
+} __packed;
+
+/* Override default charge behavior */
+#define EC_CMD_PD_CHARGE_PORT_OVERRIDE 0x0114
+
+/* Negative port parameters have special meaning */
+enum usb_pd_override_ports {
+ OVERRIDE_DONT_CHARGE = -2,
+ OVERRIDE_OFF = -1,
+ /* [0, CONFIG_USB_PD_PORT_COUNT): Port# */
+};
+
+struct ec_params_charge_port_override {
+ int16_t override_port; /* Override port# */
+} __packed;
+
+/* Read (and delete) one entry of PD event log */
+#define EC_CMD_PD_GET_LOG_ENTRY 0x0115
+
+struct ec_response_pd_log {
+ uint32_t timestamp; /* relative timestamp in milliseconds */
+ uint8_t type; /* event type : see PD_EVENT_xx below */
+ uint8_t size_port; /* [7:5] port number [4:0] payload size in bytes */
+ uint16_t data; /* type-defined data payload */
+ uint8_t payload[0]; /* optional additional data payload: 0..16 bytes */
+} __packed;
+
+/* The timestamp is the microsecond counter shifted to get about a ms. */
+#define PD_LOG_TIMESTAMP_SHIFT 10 /* 1 LSB = 1024us */
+
+#define PD_LOG_SIZE_MASK 0x1f
+#define PD_LOG_PORT_MASK 0xe0
+#define PD_LOG_PORT_SHIFT 5
+#define PD_LOG_PORT_SIZE(port, size) (((port) << PD_LOG_PORT_SHIFT) | \
+ ((size) & PD_LOG_SIZE_MASK))
+#define PD_LOG_PORT(size_port) ((size_port) >> PD_LOG_PORT_SHIFT)
+#define PD_LOG_SIZE(size_port) ((size_port) & PD_LOG_SIZE_MASK)
+
+/* PD event log : entry types */
+/* PD MCU events */
+#define PD_EVENT_MCU_BASE 0x00
+#define PD_EVENT_MCU_CHARGE (PD_EVENT_MCU_BASE+0)
+#define PD_EVENT_MCU_CONNECT (PD_EVENT_MCU_BASE+1)
+/* Reserved for custom board event */
+#define PD_EVENT_MCU_BOARD_CUSTOM (PD_EVENT_MCU_BASE+2)
+/* PD generic accessory events */
+#define PD_EVENT_ACC_BASE 0x20
+#define PD_EVENT_ACC_RW_FAIL (PD_EVENT_ACC_BASE+0)
+#define PD_EVENT_ACC_RW_ERASE (PD_EVENT_ACC_BASE+1)
+/* PD power supply events */
+#define PD_EVENT_PS_BASE 0x40
+#define PD_EVENT_PS_FAULT (PD_EVENT_PS_BASE+0)
+/* PD video dongles events */
+#define PD_EVENT_VIDEO_BASE 0x60
+#define PD_EVENT_VIDEO_DP_MODE (PD_EVENT_VIDEO_BASE+0)
+#define PD_EVENT_VIDEO_CODEC (PD_EVENT_VIDEO_BASE+1)
+/* Returned in the "type" field, when there is no entry available */
+#define PD_EVENT_NO_ENTRY 0xff
+
+/*
+ * PD_EVENT_MCU_CHARGE event definition :
+ * the payload is "struct usb_chg_measures"
+ * the data field contains the port state flags as defined below :
+ */
+/* Port partner is a dual role device */
+#define CHARGE_FLAGS_DUAL_ROLE BIT(15)
+/* Port is the pending override port */
+#define CHARGE_FLAGS_DELAYED_OVERRIDE BIT(14)
+/* Port is the override port */
+#define CHARGE_FLAGS_OVERRIDE BIT(13)
+/* Charger type */
+#define CHARGE_FLAGS_TYPE_SHIFT 3
+#define CHARGE_FLAGS_TYPE_MASK (0xf << CHARGE_FLAGS_TYPE_SHIFT)
+/* Power delivery role */
+#define CHARGE_FLAGS_ROLE_MASK (7 << 0)
+
+/*
+ * PD_EVENT_PS_FAULT data field flags definition :
+ */
+#define PS_FAULT_OCP 1
+#define PS_FAULT_FAST_OCP 2
+#define PS_FAULT_OVP 3
+#define PS_FAULT_DISCH 4
+
+/*
+ * PD_EVENT_VIDEO_CODEC payload is "struct mcdp_info".
+ */
+struct mcdp_version {
+ uint8_t major;
+ uint8_t minor;
+ uint16_t build;
+} __packed;
+
+struct mcdp_info {
+ uint8_t family[2];
+ uint8_t chipid[2];
+ struct mcdp_version irom;
+ struct mcdp_version fw;
+} __packed;
+
+/* struct mcdp_info field decoding */
+#define MCDP_CHIPID(chipid) ((chipid[0] << 8) | chipid[1])
+#define MCDP_FAMILY(family) ((family[0] << 8) | family[1])
+
/* Get info about USB-C SS muxes */
#define EC_CMD_USB_PD_MUX_INFO 0x11a
diff --git a/include/linux/mfd/da9063/core.h b/include/linux/mfd/da9063/core.h
index f3ae65db4c86..71b09154e2db 100644
--- a/include/linux/mfd/da9063/core.h
+++ b/include/linux/mfd/da9063/core.h
@@ -29,8 +29,11 @@
#define DA9063_DRVNAME_RTC "da9063-rtc"
#define DA9063_DRVNAME_VIBRATION "da9063-vibration"
-enum da9063_models {
- PMIC_DA9063 = 0x61,
+#define PMIC_CHIP_ID_DA9063 0x61
+
+enum da9063_type {
+ PMIC_TYPE_DA9063 = 0,
+ PMIC_TYPE_DA9063L,
};
enum da9063_variant_codes {
@@ -72,13 +75,10 @@ enum da9063_irqs {
DA9063_IRQ_GPI15,
};
-#define DA9063_IRQ_BASE_OFFSET 0
-#define DA9063_NUM_IRQ (DA9063_IRQ_GPI15 + 1 - DA9063_IRQ_BASE_OFFSET)
-
struct da9063 {
/* Device */
struct device *dev;
- unsigned short model;
+ enum da9063_type type;
unsigned char variant_code;
unsigned int flags;
@@ -94,7 +94,4 @@ struct da9063 {
int da9063_device_init(struct da9063 *da9063, unsigned int irq);
int da9063_irq_init(struct da9063 *da9063);
-void da9063_device_exit(struct da9063 *da9063);
-void da9063_irq_exit(struct da9063 *da9063);
-
#endif /* __MFD_DA9063_CORE_H__ */
diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h
new file mode 100644
index 000000000000..c332681848ef
--- /dev/null
+++ b/include/linux/mfd/madera/core.h
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MFD internals for Cirrus Logic Madera codecs
+ *
+ * Copyright (C) 2015-2018 Cirrus Logic
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2.
+ */
+
+#ifndef MADERA_CORE_H
+#define MADERA_CORE_H
+
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/mfd/madera/pdata.h>
+#include <linux/notifier.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+enum madera_type {
+ /* 0 is reserved for indicating failure to identify */
+ CS47L35 = 1,
+ CS47L85 = 2,
+ CS47L90 = 3,
+ CS47L91 = 4,
+ WM1840 = 7,
+};
+
+#define MADERA_MAX_CORE_SUPPLIES 2
+#define MADERA_MAX_GPIOS 40
+
+#define CS47L35_NUM_GPIOS 16
+#define CS47L85_NUM_GPIOS 40
+#define CS47L90_NUM_GPIOS 38
+
+#define MADERA_MAX_MICBIAS 4
+
+/* Notifier events */
+#define MADERA_NOTIFY_VOICE_TRIGGER 0x1
+#define MADERA_NOTIFY_HPDET 0x2
+#define MADERA_NOTIFY_MICDET 0x4
+
+/* GPIO Function Definitions */
+#define MADERA_GP_FN_ALTERNATE 0x00
+#define MADERA_GP_FN_GPIO 0x01
+#define MADERA_GP_FN_DSP_GPIO 0x02
+#define MADERA_GP_FN_IRQ1 0x03
+#define MADERA_GP_FN_IRQ2 0x04
+#define MADERA_GP_FN_FLL1_CLOCK 0x10
+#define MADERA_GP_FN_FLL2_CLOCK 0x11
+#define MADERA_GP_FN_FLL3_CLOCK 0x12
+#define MADERA_GP_FN_FLLAO_CLOCK 0x13
+#define MADERA_GP_FN_FLL1_LOCK 0x18
+#define MADERA_GP_FN_FLL2_LOCK 0x19
+#define MADERA_GP_FN_FLL3_LOCK 0x1A
+#define MADERA_GP_FN_FLLAO_LOCK 0x1B
+#define MADERA_GP_FN_OPCLK_OUT 0x40
+#define MADERA_GP_FN_OPCLK_ASYNC_OUT 0x41
+#define MADERA_GP_FN_PWM1 0x48
+#define MADERA_GP_FN_PWM2 0x49
+#define MADERA_GP_FN_SPDIF_OUT 0x4C
+#define MADERA_GP_FN_HEADPHONE_DET 0x50
+#define MADERA_GP_FN_MIC_DET 0x58
+#define MADERA_GP_FN_DRC1_SIGNAL_DETECT 0x80
+#define MADERA_GP_FN_DRC2_SIGNAL_DETECT 0x81
+#define MADERA_GP_FN_ASRC1_IN1_LOCK 0x88
+#define MADERA_GP_FN_ASRC1_IN2_LOCK 0x89
+#define MADERA_GP_FN_ASRC2_IN1_LOCK 0x8A
+#define MADERA_GP_FN_ASRC2_IN2_LOCK 0x8B
+#define MADERA_GP_FN_DSP_IRQ1 0xA0
+#define MADERA_GP_FN_DSP_IRQ2 0xA1
+#define MADERA_GP_FN_DSP_IRQ3 0xA2
+#define MADERA_GP_FN_DSP_IRQ4 0xA3
+#define MADERA_GP_FN_DSP_IRQ5 0xA4
+#define MADERA_GP_FN_DSP_IRQ6 0xA5
+#define MADERA_GP_FN_DSP_IRQ7 0xA6
+#define MADERA_GP_FN_DSP_IRQ8 0xA7
+#define MADERA_GP_FN_DSP_IRQ9 0xA8
+#define MADERA_GP_FN_DSP_IRQ10 0xA9
+#define MADERA_GP_FN_DSP_IRQ11 0xAA
+#define MADERA_GP_FN_DSP_IRQ12 0xAB
+#define MADERA_GP_FN_DSP_IRQ13 0xAC
+#define MADERA_GP_FN_DSP_IRQ14 0xAD
+#define MADERA_GP_FN_DSP_IRQ15 0xAE
+#define MADERA_GP_FN_DSP_IRQ16 0xAF
+#define MADERA_GP_FN_HPOUT1L_SC 0xB0
+#define MADERA_GP_FN_HPOUT1R_SC 0xB1
+#define MADERA_GP_FN_HPOUT2L_SC 0xB2
+#define MADERA_GP_FN_HPOUT2R_SC 0xB3
+#define MADERA_GP_FN_HPOUT3L_SC 0xB4
+#define MADERA_GP_FN_HPOUT4R_SC 0xB5
+#define MADERA_GP_FN_SPKOUTL_SC 0xB6
+#define MADERA_GP_FN_SPKOUTR_SC 0xB7
+#define MADERA_GP_FN_HPOUT1L_ENA 0xC0
+#define MADERA_GP_FN_HPOUT1R_ENA 0xC1
+#define MADERA_GP_FN_HPOUT2L_ENA 0xC2
+#define MADERA_GP_FN_HPOUT2R_ENA 0xC3
+#define MADERA_GP_FN_HPOUT3L_ENA 0xC4
+#define MADERA_GP_FN_HPOUT4R_ENA 0xC5
+#define MADERA_GP_FN_SPKOUTL_ENA 0xC6
+#define MADERA_GP_FN_SPKOUTR_ENA 0xC7
+#define MADERA_GP_FN_HPOUT1L_DIS 0xD0
+#define MADERA_GP_FN_HPOUT1R_DIS 0xD1
+#define MADERA_GP_FN_HPOUT2L_DIS 0xD2
+#define MADERA_GP_FN_HPOUT2R_DIS 0xD3
+#define MADERA_GP_FN_HPOUT3L_DIS 0xD4
+#define MADERA_GP_FN_HPOUT4R_DIS 0xD5
+#define MADERA_GP_FN_SPKOUTL_DIS 0xD6
+#define MADERA_GP_FN_SPKOUTR_DIS 0xD7
+#define MADERA_GP_FN_SPK_SHUTDOWN 0xE0
+#define MADERA_GP_FN_SPK_OVH_SHUTDOWN 0xE1
+#define MADERA_GP_FN_SPK_OVH_WARN 0xE2
+#define MADERA_GP_FN_TIMER1_STATUS 0x140
+#define MADERA_GP_FN_TIMER2_STATUS 0x141
+#define MADERA_GP_FN_TIMER3_STATUS 0x142
+#define MADERA_GP_FN_TIMER4_STATUS 0x143
+#define MADERA_GP_FN_TIMER5_STATUS 0x144
+#define MADERA_GP_FN_TIMER6_STATUS 0x145
+#define MADERA_GP_FN_TIMER7_STATUS 0x146
+#define MADERA_GP_FN_TIMER8_STATUS 0x147
+#define MADERA_GP_FN_EVENTLOG1_FIFO_STS 0x150
+#define MADERA_GP_FN_EVENTLOG2_FIFO_STS 0x151
+#define MADERA_GP_FN_EVENTLOG3_FIFO_STS 0x152
+#define MADERA_GP_FN_EVENTLOG4_FIFO_STS 0x153
+#define MADERA_GP_FN_EVENTLOG5_FIFO_STS 0x154
+#define MADERA_GP_FN_EVENTLOG6_FIFO_STS 0x155
+#define MADERA_GP_FN_EVENTLOG7_FIFO_STS 0x156
+#define MADERA_GP_FN_EVENTLOG8_FIFO_STS 0x157
+
+struct snd_soc_dapm_context;
+
+/*
+ * struct madera - internal data shared by the set of Madera drivers
+ *
+ * This should not be used by anything except child drivers of the Madera MFD
+ *
+ * @regmap: pointer to the regmap instance for 16-bit registers
+ * @regmap_32bit: pointer to the regmap instance for 32-bit registers
+ * @dev: pointer to the MFD device
+ * @type: type of codec
+ * @rev: silicon revision
+ * @type_name: display name of this codec
+ * @num_core_supplies: number of core supply regulators
+ * @core_supplies: list of core supplies that are always required
+ * @dcvdd: pointer to DCVDD regulator
+ * @internal_dcvdd: true if DCVDD is supplied from the internal LDO1
+ * @pdata: our pdata
+ * @irq_dev: the irqchip child driver device
+ * @irq: host irq number from SPI or I2C configuration
+ * @out_clamp: indicates output clamp state for each analogue output
+ * @out_shorted: indicates short circuit state for each analogue output
+ * @hp_ena: bitflags of enable state for the headphone outputs
+ * @num_micbias: number of MICBIAS outputs
+ * @num_childbias: number of child biases for each MICBIAS
+ * @dapm: pointer to codec driver DAPM context
+ * @notifier: notifier for signalling events to ASoC machine driver
+ */
+struct madera {
+ struct regmap *regmap;
+ struct regmap *regmap_32bit;
+
+ struct device *dev;
+
+ enum madera_type type;
+ unsigned int rev;
+ const char *type_name;
+
+ int num_core_supplies;
+ struct regulator_bulk_data core_supplies[MADERA_MAX_CORE_SUPPLIES];
+ struct regulator *dcvdd;
+ bool internal_dcvdd;
+
+ struct madera_pdata pdata;
+
+ struct device *irq_dev;
+ int irq;
+
+ unsigned int num_micbias;
+ unsigned int num_childbias[MADERA_MAX_MICBIAS];
+
+ struct snd_soc_dapm_context *dapm;
+
+ struct blocking_notifier_head notifier;
+};
+#endif
diff --git a/include/linux/mfd/madera/pdata.h b/include/linux/mfd/madera/pdata.h
new file mode 100644
index 000000000000..0b311f39c8f4
--- /dev/null
+++ b/include/linux/mfd/madera/pdata.h
@@ -0,0 +1,59 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Platform data for Cirrus Logic Madera codecs
+ *
+ * Copyright (C) 2015-2018 Cirrus Logic
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2.
+ */
+
+#ifndef MADERA_PDATA_H
+#define MADERA_PDATA_H
+
+#include <linux/kernel.h>
+#include <linux/regulator/arizona-ldo1.h>
+#include <linux/regulator/arizona-micsupp.h>
+#include <linux/regulator/machine.h>
+
+#define MADERA_MAX_MICBIAS 4
+#define MADERA_MAX_CHILD_MICBIAS 4
+
+#define MADERA_MAX_GPSW 2
+
+struct gpio_desc;
+struct pinctrl_map;
+struct madera_irqchip_pdata;
+struct madera_codec_pdata;
+
+/**
+ * struct madera_pdata - Configuration data for Madera devices
+ *
+ * @reset: GPIO controlling /RESET (NULL = none)
+ * @ldo1: Substruct of pdata for the LDO1 regulator
+ * @micvdd: Substruct of pdata for the MICVDD regulator
+ * @irq_flags: Mode for primary IRQ (defaults to active low)
+ * @gpio_base: Base GPIO number
+ * @gpio_configs: Array of GPIO configurations (See Documentation/pinctrl.txt)
+ * @n_gpio_configs: Number of entries in gpio_configs
+ * @gpsw: General purpose switch mode setting. Depends on the external
+ * hardware connected to the switch. (See the SW1_MODE field
+ * in the datasheet for the available values for your codec)
+ */
+struct madera_pdata {
+ struct gpio_desc *reset;
+
+ struct arizona_ldo1_pdata ldo1;
+ struct arizona_micsupp_pdata micvdd;
+
+ unsigned int irq_flags;
+ int gpio_base;
+
+ const struct pinctrl_map *gpio_configs;
+ int n_gpio_configs;
+
+ u32 gpsw[MADERA_MAX_GPSW];
+};
+
+#endif
diff --git a/include/linux/mfd/madera/registers.h b/include/linux/mfd/madera/registers.h
new file mode 100644
index 000000000000..977e06101711
--- /dev/null
+++ b/include/linux/mfd/madera/registers.h
@@ -0,0 +1,3968 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Madera register definitions
+ *
+ * Copyright (C) 2015-2018 Cirrus Logic
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; version 2.
+ */
+
+#ifndef MADERA_REGISTERS_H
+#define MADERA_REGISTERS_H
+
+/*
+ * Register Addresses.
+ */
+#define MADERA_SOFTWARE_RESET 0x00
+#define MADERA_HARDWARE_REVISION 0x01
+#define MADERA_CTRL_IF_CFG_1 0x08
+#define MADERA_CTRL_IF_CFG_2 0x09
+#define MADERA_CTRL_IF_CFG_3 0x0A
+#define MADERA_WRITE_SEQUENCER_CTRL_0 0x16
+#define MADERA_WRITE_SEQUENCER_CTRL_1 0x17
+#define MADERA_WRITE_SEQUENCER_CTRL_2 0x18
+#define MADERA_TONE_GENERATOR_1 0x20
+#define MADERA_TONE_GENERATOR_2 0x21
+#define MADERA_TONE_GENERATOR_3 0x22
+#define MADERA_TONE_GENERATOR_4 0x23
+#define MADERA_TONE_GENERATOR_5 0x24
+#define MADERA_PWM_DRIVE_1 0x30
+#define MADERA_PWM_DRIVE_2 0x31
+#define MADERA_PWM_DRIVE_3 0x32
+#define MADERA_SEQUENCE_CONTROL 0x41
+#define MADERA_SAMPLE_RATE_SEQUENCE_SELECT_1 0x61
+#define MADERA_SAMPLE_RATE_SEQUENCE_SELECT_2 0x62
+#define MADERA_SAMPLE_RATE_SEQUENCE_SELECT_3 0x63
+#define MADERA_SAMPLE_RATE_SEQUENCE_SELECT_4 0x64
+#define MADERA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_1 0x66
+#define MADERA_ALWAYS_ON_TRIGGERS_SEQUENCE_SELECT_2 0x67
+#define MADERA_HAPTICS_CONTROL_1 0x90
+#define MADERA_HAPTICS_CONTROL_2 0x91
+#define MADERA_HAPTICS_PHASE_1_INTENSITY 0x92
+#define MADERA_HAPTICS_PHASE_1_DURATION 0x93
+#define MADERA_HAPTICS_PHASE_2_INTENSITY 0x94
+#define MADERA_HAPTICS_PHASE_2_DURATION 0x95
+#define MADERA_HAPTICS_PHASE_3_INTENSITY 0x96
+#define MADERA_HAPTICS_PHASE_3_DURATION 0x97
+#define MADERA_HAPTICS_STATUS 0x98
+#define MADERA_COMFORT_NOISE_GENERATOR 0xA0
+#define MADERA_CLOCK_32K_1 0x100
+#define MADERA_SYSTEM_CLOCK_1 0x101
+#define MADERA_SAMPLE_RATE_1 0x102
+#define MADERA_SAMPLE_RATE_2 0x103
+#define MADERA_SAMPLE_RATE_3 0x104
+#define MADERA_SAMPLE_RATE_1_STATUS 0x10A
+#define MADERA_SAMPLE_RATE_2_STATUS 0x10B
+#define MADERA_SAMPLE_RATE_3_STATUS 0x10C
+#define MADERA_ASYNC_CLOCK_1 0x112
+#define MADERA_ASYNC_SAMPLE_RATE_1 0x113
+#define MADERA_ASYNC_SAMPLE_RATE_2 0x114
+#define MADERA_ASYNC_SAMPLE_RATE_1_STATUS 0x11B
+#define MADERA_ASYNC_SAMPLE_RATE_2_STATUS 0x11C
+#define MADERA_DSP_CLOCK_1 0x120
+#define MADERA_DSP_CLOCK_2 0x122
+#define MADERA_OUTPUT_SYSTEM_CLOCK 0x149
+#define MADERA_OUTPUT_ASYNC_CLOCK 0x14A
+#define MADERA_RATE_ESTIMATOR_1 0x152
+#define MADERA_RATE_ESTIMATOR_2 0x153
+#define MADERA_RATE_ESTIMATOR_3 0x154
+#define MADERA_RATE_ESTIMATOR_4 0x155
+#define MADERA_RATE_ESTIMATOR_5 0x156
+#define MADERA_FLL1_CONTROL_1 0x171
+#define MADERA_FLL1_CONTROL_2 0x172
+#define MADERA_FLL1_CONTROL_3 0x173
+#define MADERA_FLL1_CONTROL_4 0x174
+#define MADERA_FLL1_CONTROL_5 0x175
+#define MADERA_FLL1_CONTROL_6 0x176
+#define MADERA_FLL1_LOOP_FILTER_TEST_1 0x177
+#define MADERA_FLL1_NCO_TEST_0 0x178
+#define MADERA_FLL1_CONTROL_7 0x179
+#define MADERA_FLL1_EFS_2 0x17A
+#define CS47L35_FLL1_SYNCHRONISER_1 0x17F
+#define CS47L35_FLL1_SYNCHRONISER_2 0x180
+#define CS47L35_FLL1_SYNCHRONISER_3 0x181
+#define CS47L35_FLL1_SYNCHRONISER_4 0x182
+#define CS47L35_FLL1_SYNCHRONISER_5 0x183
+#define CS47L35_FLL1_SYNCHRONISER_6 0x184
+#define CS47L35_FLL1_SYNCHRONISER_7 0x185
+#define CS47L35_FLL1_SPREAD_SPECTRUM 0x187
+#define CS47L35_FLL1_GPIO_CLOCK 0x188
+#define MADERA_FLL1_SYNCHRONISER_1 0x181
+#define MADERA_FLL1_SYNCHRONISER_2 0x182
+#define MADERA_FLL1_SYNCHRONISER_3 0x183
+#define MADERA_FLL1_SYNCHRONISER_4 0x184
+#define MADERA_FLL1_SYNCHRONISER_5 0x185
+#define MADERA_FLL1_SYNCHRONISER_6 0x186
+#define MADERA_FLL1_SYNCHRONISER_7 0x187
+#define MADERA_FLL1_SPREAD_SPECTRUM 0x189
+#define MADERA_FLL1_GPIO_CLOCK 0x18A
+#define MADERA_FLL2_CONTROL_1 0x191
+#define MADERA_FLL2_CONTROL_2 0x192
+#define MADERA_FLL2_CONTROL_3 0x193
+#define MADERA_FLL2_CONTROL_4 0x194
+#define MADERA_FLL2_CONTROL_5 0x195
+#define MADERA_FLL2_CONTROL_6 0x196
+#define MADERA_FLL2_LOOP_FILTER_TEST_1 0x197
+#define MADERA_FLL2_NCO_TEST_0 0x198
+#define MADERA_FLL2_CONTROL_7 0x199
+#define MADERA_FLL2_EFS_2 0x19A
+#define MADERA_FLL2_SYNCHRONISER_1 0x1A1
+#define MADERA_FLL2_SYNCHRONISER_2 0x1A2
+#define MADERA_FLL2_SYNCHRONISER_3 0x1A3
+#define MADERA_FLL2_SYNCHRONISER_4 0x1A4
+#define MADERA_FLL2_SYNCHRONISER_5 0x1A5
+#define MADERA_FLL2_SYNCHRONISER_6 0x1A6
+#define MADERA_FLL2_SYNCHRONISER_7 0x1A7
+#define MADERA_FLL2_SPREAD_SPECTRUM 0x1A9
+#define MADERA_FLL2_GPIO_CLOCK 0x1AA
+#define MADERA_FLL3_CONTROL_1 0x1B1
+#define MADERA_FLL3_CONTROL_2 0x1B2
+#define MADERA_FLL3_CONTROL_3 0x1B3
+#define MADERA_FLL3_CONTROL_4 0x1B4
+#define MADERA_FLL3_CONTROL_5 0x1B5
+#define MADERA_FLL3_CONTROL_6 0x1B6
+#define MADERA_FLL3_LOOP_FILTER_TEST_1 0x1B7
+#define MADERA_FLL3_NCO_TEST_0 0x1B8
+#define MADERA_FLL3_CONTROL_7 0x1B9
+#define MADERA_FLL3_SYNCHRONISER_1 0x1C1
+#define MADERA_FLL3_SYNCHRONISER_2 0x1C2
+#define MADERA_FLL3_SYNCHRONISER_3 0x1C3
+#define MADERA_FLL3_SYNCHRONISER_4 0x1C4
+#define MADERA_FLL3_SYNCHRONISER_5 0x1C5
+#define MADERA_FLL3_SYNCHRONISER_6 0x1C6
+#define MADERA_FLL3_SYNCHRONISER_7 0x1C7
+#define MADERA_FLL3_SPREAD_SPECTRUM 0x1C9
+#define MADERA_FLL3_GPIO_CLOCK 0x1CA
+#define MADERA_FLLAO_CONTROL_1 0x1D1
+#define MADERA_FLLAO_CONTROL_2 0x1D2
+#define MADERA_FLLAO_CONTROL_3 0x1D3
+#define MADERA_FLLAO_CONTROL_4 0x1D4
+#define MADERA_FLLAO_CONTROL_5 0x1D5
+#define MADERA_FLLAO_CONTROL_6 0x1D6
+#define MADERA_FLLAO_CONTROL_7 0x1D8
+#define MADERA_FLLAO_CONTROL_8 0x1DA
+#define MADERA_FLLAO_CONTROL_9 0x1DB
+#define MADERA_FLLAO_CONTROL_10 0x1DC
+#define MADERA_FLLAO_CONTROL_11 0x1DD
+#define MADERA_MIC_CHARGE_PUMP_1 0x200
+#define MADERA_HP_CHARGE_PUMP_8 0x20B
+#define MADERA_LDO1_CONTROL_1 0x210
+#define MADERA_LDO2_CONTROL_1 0x213
+#define MADERA_MIC_BIAS_CTRL_1 0x218
+#define MADERA_MIC_BIAS_CTRL_2 0x219
+#define MADERA_MIC_BIAS_CTRL_3 0x21A
+#define MADERA_MIC_BIAS_CTRL_4 0x21B
+#define MADERA_MIC_BIAS_CTRL_5 0x21C
+#define MADERA_MIC_BIAS_CTRL_6 0x21E
+#define MADERA_HP_CTRL_1L 0x225
+#define MADERA_HP_CTRL_1R 0x226
+#define MADERA_HP_CTRL_2L 0x227
+#define MADERA_HP_CTRL_2R 0x228
+#define MADERA_HP_CTRL_3L 0x229
+#define MADERA_HP_CTRL_3R 0x22A
+#define MADERA_DCS_HP1L_CONTROL 0x232
+#define MADERA_DCS_HP1R_CONTROL 0x238
+#define MADERA_EDRE_HP_STEREO_CONTROL 0x27E
+#define MADERA_ACCESSORY_DETECT_MODE_1 0x293
+#define MADERA_HEADPHONE_DETECT_0 0x299
+#define MADERA_HEADPHONE_DETECT_1 0x29B
+#define MADERA_HEADPHONE_DETECT_2 0x29C
+#define MADERA_HEADPHONE_DETECT_3 0x29D
+#define MADERA_HEADPHONE_DETECT_4 0x29E
+#define MADERA_HEADPHONE_DETECT_5 0x29F
+#define MADERA_MIC_DETECT_1_CONTROL_0 0x2A2
+#define MADERA_MIC_DETECT_1_CONTROL_1 0x2A3
+#define MADERA_MIC_DETECT_1_CONTROL_2 0x2A4
+#define MADERA_MIC_DETECT_1_CONTROL_3 0x2A5
+#define MADERA_MIC_DETECT_1_LEVEL_1 0x2A6
+#define MADERA_MIC_DETECT_1_LEVEL_2 0x2A7
+#define MADERA_MIC_DETECT_1_LEVEL_3 0x2A8
+#define MADERA_MIC_DETECT_1_LEVEL_4 0x2A9
+#define MADERA_MIC_DETECT_1_CONTROL_4 0x2AB
+#define MADERA_MIC_DETECT_2_CONTROL_0 0x2B2
+#define MADERA_MIC_DETECT_2_CONTROL_1 0x2B3
+#define MADERA_MIC_DETECT_2_CONTROL_2 0x2B4
+#define MADERA_MIC_DETECT_2_CONTROL_3 0x2B5
+#define MADERA_MIC_DETECT_2_LEVEL_1 0x2B6
+#define MADERA_MIC_DETECT_2_LEVEL_2 0x2B7
+#define MADERA_MIC_DETECT_2_LEVEL_3 0x2B8
+#define MADERA_MIC_DETECT_2_LEVEL_4 0x2B9
+#define MADERA_MIC_DETECT_2_CONTROL_4 0x2BB
+#define MADERA_MICD_CLAMP_CONTROL 0x2C6
+#define MADERA_GP_SWITCH_1 0x2C8
+#define MADERA_JACK_DETECT_ANALOGUE 0x2D3
+#define MADERA_INPUT_ENABLES 0x300
+#define MADERA_INPUT_ENABLES_STATUS 0x301
+#define MADERA_INPUT_RATE 0x308
+#define MADERA_INPUT_VOLUME_RAMP 0x309
+#define MADERA_HPF_CONTROL 0x30C
+#define MADERA_IN1L_CONTROL 0x310
+#define MADERA_ADC_DIGITAL_VOLUME_1L 0x311
+#define MADERA_DMIC1L_CONTROL 0x312
+#define MADERA_IN1L_RATE_CONTROL 0x313
+#define MADERA_IN1R_CONTROL 0x314
+#define MADERA_ADC_DIGITAL_VOLUME_1R 0x315
+#define MADERA_DMIC1R_CONTROL 0x316
+#define MADERA_IN1R_RATE_CONTROL 0x317
+#define MADERA_IN2L_CONTROL 0x318
+#define MADERA_ADC_DIGITAL_VOLUME_2L 0x319
+#define MADERA_DMIC2L_CONTROL 0x31A
+#define MADERA_IN2L_RATE_CONTROL 0x31B
+#define MADERA_IN2R_CONTROL 0x31C
+#define MADERA_ADC_DIGITAL_VOLUME_2R 0x31D
+#define MADERA_DMIC2R_CONTROL 0x31E
+#define MADERA_IN2R_RATE_CONTROL 0x31F
+#define MADERA_IN3L_CONTROL 0x320
+#define MADERA_ADC_DIGITAL_VOLUME_3L 0x321
+#define MADERA_DMIC3L_CONTROL 0x322
+#define MADERA_IN3L_RATE_CONTROL 0x323
+#define MADERA_IN3R_CONTROL 0x324
+#define MADERA_ADC_DIGITAL_VOLUME_3R 0x325
+#define MADERA_DMIC3R_CONTROL 0x326
+#define MADERA_IN3R_RATE_CONTROL 0x327
+#define MADERA_IN4L_CONTROL 0x328
+#define MADERA_ADC_DIGITAL_VOLUME_4L 0x329
+#define MADERA_DMIC4L_CONTROL 0x32A
+#define MADERA_IN4L_RATE_CONTROL 0x32B
+#define MADERA_IN4R_CONTROL 0x32C
+#define MADERA_ADC_DIGITAL_VOLUME_4R 0x32D
+#define MADERA_DMIC4R_CONTROL 0x32E
+#define MADERA_IN4R_RATE_CONTROL 0x32F
+#define MADERA_IN5L_CONTROL 0x330
+#define MADERA_ADC_DIGITAL_VOLUME_5L 0x331
+#define MADERA_DMIC5L_CONTROL 0x332
+#define MADERA_IN5L_RATE_CONTROL 0x333
+#define MADERA_IN5R_CONTROL 0x334
+#define MADERA_ADC_DIGITAL_VOLUME_5R 0x335
+#define MADERA_DMIC5R_CONTROL 0x336
+#define MADERA_IN5R_RATE_CONTROL 0x337
+#define MADERA_IN6L_CONTROL 0x338
+#define MADERA_ADC_DIGITAL_VOLUME_6L 0x339
+#define MADERA_DMIC6L_CONTROL 0x33A
+#define MADERA_IN6R_CONTROL 0x33C
+#define MADERA_ADC_DIGITAL_VOLUME_6R 0x33D
+#define MADERA_DMIC6R_CONTROL 0x33E
+#define MADERA_OUTPUT_ENABLES_1 0x400
+#define MADERA_OUTPUT_STATUS_1 0x401
+#define MADERA_RAW_OUTPUT_STATUS_1 0x406
+#define MADERA_OUTPUT_RATE_1 0x408
+#define MADERA_OUTPUT_VOLUME_RAMP 0x409
+#define MADERA_OUTPUT_PATH_CONFIG_1L 0x410
+#define MADERA_DAC_DIGITAL_VOLUME_1L 0x411
+#define MADERA_OUTPUT_PATH_CONFIG_1 0x412
+#define MADERA_NOISE_GATE_SELECT_1L 0x413
+#define MADERA_OUTPUT_PATH_CONFIG_1R 0x414
+#define MADERA_DAC_DIGITAL_VOLUME_1R 0x415
+#define MADERA_NOISE_GATE_SELECT_1R 0x417
+#define MADERA_OUTPUT_PATH_CONFIG_2L 0x418
+#define MADERA_DAC_DIGITAL_VOLUME_2L 0x419
+#define MADERA_OUTPUT_PATH_CONFIG_2 0x41A
+#define MADERA_NOISE_GATE_SELECT_2L 0x41B
+#define MADERA_OUTPUT_PATH_CONFIG_2R 0x41C
+#define MADERA_DAC_DIGITAL_VOLUME_2R 0x41D
+#define MADERA_NOISE_GATE_SELECT_2R 0x41F
+#define MADERA_OUTPUT_PATH_CONFIG_3L 0x420
+#define MADERA_DAC_DIGITAL_VOLUME_3L 0x421
+#define MADERA_NOISE_GATE_SELECT_3L 0x423
+#define MADERA_OUTPUT_PATH_CONFIG_3R 0x424
+#define MADERA_DAC_DIGITAL_VOLUME_3R 0x425
+#define MADERA_NOISE_GATE_SELECT_3R 0x427
+#define MADERA_OUTPUT_PATH_CONFIG_4L 0x428
+#define MADERA_DAC_DIGITAL_VOLUME_4L 0x429
+#define MADERA_NOISE_GATE_SELECT_4L 0x42B
+#define MADERA_OUTPUT_PATH_CONFIG_4R 0x42C
+#define MADERA_DAC_DIGITAL_VOLUME_4R 0x42D
+#define MADERA_NOISE_GATE_SELECT_4R 0x42F
+#define MADERA_OUTPUT_PATH_CONFIG_5L 0x430
+#define MADERA_DAC_DIGITAL_VOLUME_5L 0x431
+#define MADERA_NOISE_GATE_SELECT_5L 0x433
+#define MADERA_OUTPUT_PATH_CONFIG_5R 0x434
+#define MADERA_DAC_DIGITAL_VOLUME_5R 0x435
+#define MADERA_NOISE_GATE_SELECT_5R 0x437
+#define MADERA_OUTPUT_PATH_CONFIG_6L 0x438
+#define MADERA_DAC_DIGITAL_VOLUME_6L 0x439
+#define MADERA_NOISE_GATE_SELECT_6L 0x43B
+#define MADERA_OUTPUT_PATH_CONFIG_6R 0x43C
+#define MADERA_DAC_DIGITAL_VOLUME_6R 0x43D
+#define MADERA_NOISE_GATE_SELECT_6R 0x43F
+#define MADERA_DRE_ENABLE 0x440
+#define MADERA_EDRE_ENABLE 0x448
+#define MADERA_EDRE_MANUAL 0x44A
+#define MADERA_DAC_AEC_CONTROL_1 0x450
+#define MADERA_DAC_AEC_CONTROL_2 0x451
+#define MADERA_NOISE_GATE_CONTROL 0x458
+#define MADERA_PDM_SPK1_CTRL_1 0x490
+#define MADERA_PDM_SPK1_CTRL_2 0x491
+#define MADERA_PDM_SPK2_CTRL_1 0x492
+#define MADERA_PDM_SPK2_CTRL_2 0x493
+#define MADERA_HP1_SHORT_CIRCUIT_CTRL 0x4A0
+#define MADERA_HP2_SHORT_CIRCUIT_CTRL 0x4A1
+#define MADERA_HP3_SHORT_CIRCUIT_CTRL 0x4A2
+#define MADERA_HP_TEST_CTRL_1 0x4A4
+#define MADERA_HP_TEST_CTRL_5 0x4A8
+#define MADERA_HP_TEST_CTRL_6 0x4A9
+#define MADERA_AIF1_BCLK_CTRL 0x500
+#define MADERA_AIF1_TX_PIN_CTRL 0x501
+#define MADERA_AIF1_RX_PIN_CTRL 0x502
+#define MADERA_AIF1_RATE_CTRL 0x503
+#define MADERA_AIF1_FORMAT 0x504
+#define MADERA_AIF1_RX_BCLK_RATE 0x506
+#define MADERA_AIF1_FRAME_CTRL_1 0x507
+#define MADERA_AIF1_FRAME_CTRL_2 0x508
+#define MADERA_AIF1_FRAME_CTRL_3 0x509
+#define MADERA_AIF1_FRAME_CTRL_4 0x50A
+#define MADERA_AIF1_FRAME_CTRL_5 0x50B
+#define MADERA_AIF1_FRAME_CTRL_6 0x50C
+#define MADERA_AIF1_FRAME_CTRL_7 0x50D
+#define MADERA_AIF1_FRAME_CTRL_8 0x50E
+#define MADERA_AIF1_FRAME_CTRL_9 0x50F
+#define MADERA_AIF1_FRAME_CTRL_10 0x510
+#define MADERA_AIF1_FRAME_CTRL_11 0x511
+#define MADERA_AIF1_FRAME_CTRL_12 0x512
+#define MADERA_AIF1_FRAME_CTRL_13 0x513
+#define MADERA_AIF1_FRAME_CTRL_14 0x514
+#define MADERA_AIF1_FRAME_CTRL_15 0x515
+#define MADERA_AIF1_FRAME_CTRL_16 0x516
+#define MADERA_AIF1_FRAME_CTRL_17 0x517
+#define MADERA_AIF1_FRAME_CTRL_18 0x518
+#define MADERA_AIF1_TX_ENABLES 0x519
+#define MADERA_AIF1_RX_ENABLES 0x51A
+#define MADERA_AIF1_FORCE_WRITE 0x51B
+#define MADERA_AIF2_BCLK_CTRL 0x540
+#define MADERA_AIF2_TX_PIN_CTRL 0x541
+#define MADERA_AIF2_RX_PIN_CTRL 0x542
+#define MADERA_AIF2_RATE_CTRL 0x543
+#define MADERA_AIF2_FORMAT 0x544
+#define MADERA_AIF2_RX_BCLK_RATE 0x546
+#define MADERA_AIF2_FRAME_CTRL_1 0x547
+#define MADERA_AIF2_FRAME_CTRL_2 0x548
+#define MADERA_AIF2_FRAME_CTRL_3 0x549
+#define MADERA_AIF2_FRAME_CTRL_4 0x54A
+#define MADERA_AIF2_FRAME_CTRL_5 0x54B
+#define MADERA_AIF2_FRAME_CTRL_6 0x54C
+#define MADERA_AIF2_FRAME_CTRL_7 0x54D
+#define MADERA_AIF2_FRAME_CTRL_8 0x54E
+#define MADERA_AIF2_FRAME_CTRL_9 0x54F
+#define MADERA_AIF2_FRAME_CTRL_10 0x550
+#define MADERA_AIF2_FRAME_CTRL_11 0x551
+#define MADERA_AIF2_FRAME_CTRL_12 0x552
+#define MADERA_AIF2_FRAME_CTRL_13 0x553
+#define MADERA_AIF2_FRAME_CTRL_14 0x554
+#define MADERA_AIF2_FRAME_CTRL_15 0x555
+#define MADERA_AIF2_FRAME_CTRL_16 0x556
+#define MADERA_AIF2_FRAME_CTRL_17 0x557
+#define MADERA_AIF2_FRAME_CTRL_18 0x558
+#define MADERA_AIF2_TX_ENABLES 0x559
+#define MADERA_AIF2_RX_ENABLES 0x55A
+#define MADERA_AIF2_FORCE_WRITE 0x55B
+#define MADERA_AIF3_BCLK_CTRL 0x580
+#define MADERA_AIF3_TX_PIN_CTRL 0x581
+#define MADERA_AIF3_RX_PIN_CTRL 0x582
+#define MADERA_AIF3_RATE_CTRL 0x583
+#define MADERA_AIF3_FORMAT 0x584
+#define MADERA_AIF3_RX_BCLK_RATE 0x586
+#define MADERA_AIF3_FRAME_CTRL_1 0x587
+#define MADERA_AIF3_FRAME_CTRL_2 0x588
+#define MADERA_AIF3_FRAME_CTRL_3 0x589
+#define MADERA_AIF3_FRAME_CTRL_4 0x58A
+#define MADERA_AIF3_FRAME_CTRL_11 0x591
+#define MADERA_AIF3_FRAME_CTRL_12 0x592
+#define MADERA_AIF3_TX_ENABLES 0x599
+#define MADERA_AIF3_RX_ENABLES 0x59A
+#define MADERA_AIF3_FORCE_WRITE 0x59B
+#define MADERA_AIF4_BCLK_CTRL 0x5A0
+#define MADERA_AIF4_TX_PIN_CTRL 0x5A1
+#define MADERA_AIF4_RX_PIN_CTRL 0x5A2
+#define MADERA_AIF4_RATE_CTRL 0x5A3
+#define MADERA_AIF4_FORMAT 0x5A4
+#define MADERA_AIF4_RX_BCLK_RATE 0x5A6
+#define MADERA_AIF4_FRAME_CTRL_1 0x5A7
+#define MADERA_AIF4_FRAME_CTRL_2 0x5A8
+#define MADERA_AIF4_FRAME_CTRL_3 0x5A9
+#define MADERA_AIF4_FRAME_CTRL_4 0x5AA
+#define MADERA_AIF4_FRAME_CTRL_11 0x5B1
+#define MADERA_AIF4_FRAME_CTRL_12 0x5B2
+#define MADERA_AIF4_TX_ENABLES 0x5B9
+#define MADERA_AIF4_RX_ENABLES 0x5BA
+#define MADERA_AIF4_FORCE_WRITE 0x5BB
+#define MADERA_SPD1_TX_CONTROL 0x5C2
+#define MADERA_SPD1_TX_CHANNEL_STATUS_1 0x5C3
+#define MADERA_SPD1_TX_CHANNEL_STATUS_2 0x5C4
+#define MADERA_SPD1_TX_CHANNEL_STATUS_3 0x5C5
+#define MADERA_SLIMBUS_FRAMER_REF_GEAR 0x5E3
+#define MADERA_SLIMBUS_RATES_1 0x5E5
+#define MADERA_SLIMBUS_RATES_2 0x5E6
+#define MADERA_SLIMBUS_RATES_3 0x5E7
+#define MADERA_SLIMBUS_RATES_4 0x5E8
+#define MADERA_SLIMBUS_RATES_5 0x5E9
+#define MADERA_SLIMBUS_RATES_6 0x5EA
+#define MADERA_SLIMBUS_RATES_7 0x5EB
+#define MADERA_SLIMBUS_RATES_8 0x5EC
+#define MADERA_SLIMBUS_RX_CHANNEL_ENABLE 0x5F5
+#define MADERA_SLIMBUS_TX_CHANNEL_ENABLE 0x5F6
+#define MADERA_SLIMBUS_RX_PORT_STATUS 0x5F7
+#define MADERA_SLIMBUS_TX_PORT_STATUS 0x5F8
+#define MADERA_PWM1MIX_INPUT_1_SOURCE 0x640
+#define MADERA_PWM1MIX_INPUT_1_VOLUME 0x641
+#define MADERA_PWM1MIX_INPUT_2_SOURCE 0x642
+#define MADERA_PWM1MIX_INPUT_2_VOLUME 0x643
+#define MADERA_PWM1MIX_INPUT_3_SOURCE 0x644
+#define MADERA_PWM1MIX_INPUT_3_VOLUME 0x645
+#define MADERA_PWM1MIX_INPUT_4_SOURCE 0x646
+#define MADERA_PWM1MIX_INPUT_4_VOLUME 0x647
+#define MADERA_PWM2MIX_INPUT_1_SOURCE 0x648
+#define MADERA_PWM2MIX_INPUT_1_VOLUME 0x649
+#define MADERA_PWM2MIX_INPUT_2_SOURCE 0x64A
+#define MADERA_PWM2MIX_INPUT_2_VOLUME 0x64B
+#define MADERA_PWM2MIX_INPUT_3_SOURCE 0x64C
+#define MADERA_PWM2MIX_INPUT_3_VOLUME 0x64D
+#define MADERA_PWM2MIX_INPUT_4_SOURCE 0x64E
+#define MADERA_PWM2MIX_INPUT_4_VOLUME 0x64F
+#define MADERA_OUT1LMIX_INPUT_1_SOURCE 0x680
+#define MADERA_OUT1LMIX_INPUT_1_VOLUME 0x681
+#define MADERA_OUT1LMIX_INPUT_2_SOURCE 0x682
+#define MADERA_OUT1LMIX_INPUT_2_VOLUME 0x683
+#define MADERA_OUT1LMIX_INPUT_3_SOURCE 0x684
+#define MADERA_OUT1LMIX_INPUT_3_VOLUME 0x685
+#define MADERA_OUT1LMIX_INPUT_4_SOURCE 0x686
+#define MADERA_OUT1LMIX_INPUT_4_VOLUME 0x687
+#define MADERA_OUT1RMIX_INPUT_1_SOURCE 0x688
+#define MADERA_OUT1RMIX_INPUT_1_VOLUME 0x689
+#define MADERA_OUT1RMIX_INPUT_2_SOURCE 0x68A
+#define MADERA_OUT1RMIX_INPUT_2_VOLUME 0x68B
+#define MADERA_OUT1RMIX_INPUT_3_SOURCE 0x68C
+#define MADERA_OUT1RMIX_INPUT_3_VOLUME 0x68D
+#define MADERA_OUT1RMIX_INPUT_4_SOURCE 0x68E
+#define MADERA_OUT1RMIX_INPUT_4_VOLUME 0x68F
+#define MADERA_OUT2LMIX_INPUT_1_SOURCE 0x690
+#define MADERA_OUT2LMIX_INPUT_1_VOLUME 0x691
+#define MADERA_OUT2LMIX_INPUT_2_SOURCE 0x692
+#define MADERA_OUT2LMIX_INPUT_2_VOLUME 0x693
+#define MADERA_OUT2LMIX_INPUT_3_SOURCE 0x694
+#define MADERA_OUT2LMIX_INPUT_3_VOLUME 0x695
+#define MADERA_OUT2LMIX_INPUT_4_SOURCE 0x696
+#define MADERA_OUT2LMIX_INPUT_4_VOLUME 0x697
+#define MADERA_OUT2RMIX_INPUT_1_SOURCE 0x698
+#define MADERA_OUT2RMIX_INPUT_1_VOLUME 0x699
+#define MADERA_OUT2RMIX_INPUT_2_SOURCE 0x69A
+#define MADERA_OUT2RMIX_INPUT_2_VOLUME 0x69B
+#define MADERA_OUT2RMIX_INPUT_3_SOURCE 0x69C
+#define MADERA_OUT2RMIX_INPUT_3_VOLUME 0x69D
+#define MADERA_OUT2RMIX_INPUT_4_SOURCE 0x69E
+#define MADERA_OUT2RMIX_INPUT_4_VOLUME 0x69F
+#define MADERA_OUT3LMIX_INPUT_1_SOURCE 0x6A0
+#define MADERA_OUT3LMIX_INPUT_1_VOLUME 0x6A1
+#define MADERA_OUT3LMIX_INPUT_2_SOURCE 0x6A2
+#define MADERA_OUT3LMIX_INPUT_2_VOLUME 0x6A3
+#define MADERA_OUT3LMIX_INPUT_3_SOURCE 0x6A4
+#define MADERA_OUT3LMIX_INPUT_3_VOLUME 0x6A5
+#define MADERA_OUT3LMIX_INPUT_4_SOURCE 0x6A6
+#define MADERA_OUT3LMIX_INPUT_4_VOLUME 0x6A7
+#define MADERA_OUT3RMIX_INPUT_1_SOURCE 0x6A8
+#define MADERA_OUT3RMIX_INPUT_1_VOLUME 0x6A9
+#define MADERA_OUT3RMIX_INPUT_2_SOURCE 0x6AA
+#define MADERA_OUT3RMIX_INPUT_2_VOLUME 0x6AB
+#define MADERA_OUT3RMIX_INPUT_3_SOURCE 0x6AC
+#define MADERA_OUT3RMIX_INPUT_3_VOLUME 0x6AD
+#define MADERA_OUT3RMIX_INPUT_4_SOURCE 0x6AE
+#define MADERA_OUT3RMIX_INPUT_4_VOLUME 0x6AF
+#define MADERA_OUT4LMIX_INPUT_1_SOURCE 0x6B0
+#define MADERA_OUT4LMIX_INPUT_1_VOLUME 0x6B1
+#define MADERA_OUT4LMIX_INPUT_2_SOURCE 0x6B2
+#define MADERA_OUT4LMIX_INPUT_2_VOLUME 0x6B3
+#define MADERA_OUT4LMIX_INPUT_3_SOURCE 0x6B4
+#define MADERA_OUT4LMIX_INPUT_3_VOLUME 0x6B5
+#define MADERA_OUT4LMIX_INPUT_4_SOURCE 0x6B6
+#define MADERA_OUT4LMIX_INPUT_4_VOLUME 0x6B7
+#define MADERA_OUT4RMIX_INPUT_1_SOURCE 0x6B8
+#define MADERA_OUT4RMIX_INPUT_1_VOLUME 0x6B9
+#define MADERA_OUT4RMIX_INPUT_2_SOURCE 0x6BA
+#define MADERA_OUT4RMIX_INPUT_2_VOLUME 0x6BB
+#define MADERA_OUT4RMIX_INPUT_3_SOURCE 0x6BC
+#define MADERA_OUT4RMIX_INPUT_3_VOLUME 0x6BD
+#define MADERA_OUT4RMIX_INPUT_4_SOURCE 0x6BE
+#define MADERA_OUT4RMIX_INPUT_4_VOLUME 0x6BF
+#define MADERA_OUT5LMIX_INPUT_1_SOURCE 0x6C0
+#define MADERA_OUT5LMIX_INPUT_1_VOLUME 0x6C1
+#define MADERA_OUT5LMIX_INPUT_2_SOURCE 0x6C2
+#define MADERA_OUT5LMIX_INPUT_2_VOLUME 0x6C3
+#define MADERA_OUT5LMIX_INPUT_3_SOURCE 0x6C4
+#define MADERA_OUT5LMIX_INPUT_3_VOLUME 0x6C5
+#define MADERA_OUT5LMIX_INPUT_4_SOURCE 0x6C6
+#define MADERA_OUT5LMIX_INPUT_4_VOLUME 0x6C7
+#define MADERA_OUT5RMIX_INPUT_1_SOURCE 0x6C8
+#define MADERA_OUT5RMIX_INPUT_1_VOLUME 0x6C9
+#define MADERA_OUT5RMIX_INPUT_2_SOURCE 0x6CA
+#define MADERA_OUT5RMIX_INPUT_2_VOLUME 0x6CB
+#define MADERA_OUT5RMIX_INPUT_3_SOURCE 0x6CC
+#define MADERA_OUT5RMIX_INPUT_3_VOLUME 0x6CD
+#define MADERA_OUT5RMIX_INPUT_4_SOURCE 0x6CE
+#define MADERA_OUT5RMIX_INPUT_4_VOLUME 0x6CF
+#define MADERA_OUT6LMIX_INPUT_1_SOURCE 0x6D0
+#define MADERA_OUT6LMIX_INPUT_1_VOLUME 0x6D1
+#define MADERA_OUT6LMIX_INPUT_2_SOURCE 0x6D2
+#define MADERA_OUT6LMIX_INPUT_2_VOLUME 0x6D3
+#define MADERA_OUT6LMIX_INPUT_3_SOURCE 0x6D4
+#define MADERA_OUT6LMIX_INPUT_3_VOLUME 0x6D5
+#define MADERA_OUT6LMIX_INPUT_4_SOURCE 0x6D6
+#define MADERA_OUT6LMIX_INPUT_4_VOLUME 0x6D7
+#define MADERA_OUT6RMIX_INPUT_1_SOURCE 0x6D8
+#define MADERA_OUT6RMIX_INPUT_1_VOLUME 0x6D9
+#define MADERA_OUT6RMIX_INPUT_2_SOURCE 0x6DA
+#define MADERA_OUT6RMIX_INPUT_2_VOLUME 0x6DB
+#define MADERA_OUT6RMIX_INPUT_3_SOURCE 0x6DC
+#define MADERA_OUT6RMIX_INPUT_3_VOLUME 0x6DD
+#define MADERA_OUT6RMIX_INPUT_4_SOURCE 0x6DE
+#define MADERA_OUT6RMIX_INPUT_4_VOLUME 0x6DF
+#define MADERA_AIF1TX1MIX_INPUT_1_SOURCE 0x700
+#define MADERA_AIF1TX1MIX_INPUT_1_VOLUME 0x701
+#define MADERA_AIF1TX1MIX_INPUT_2_SOURCE 0x702
+#define MADERA_AIF1TX1MIX_INPUT_2_VOLUME 0x703
+#define MADERA_AIF1TX1MIX_INPUT_3_SOURCE 0x704
+#define MADERA_AIF1TX1MIX_INPUT_3_VOLUME 0x705
+#define MADERA_AIF1TX1MIX_INPUT_4_SOURCE 0x706
+#define MADERA_AIF1TX1MIX_INPUT_4_VOLUME 0x707
+#define MADERA_AIF1TX2MIX_INPUT_1_SOURCE 0x708
+#define MADERA_AIF1TX2MIX_INPUT_1_VOLUME 0x709
+#define MADERA_AIF1TX2MIX_INPUT_2_SOURCE 0x70A
+#define MADERA_AIF1TX2MIX_INPUT_2_VOLUME 0x70B
+#define MADERA_AIF1TX2MIX_INPUT_3_SOURCE 0x70C
+#define MADERA_AIF1TX2MIX_INPUT_3_VOLUME 0x70D
+#define MADERA_AIF1TX2MIX_INPUT_4_SOURCE 0x70E
+#define MADERA_AIF1TX2MIX_INPUT_4_VOLUME 0x70F
+#define MADERA_AIF1TX3MIX_INPUT_1_SOURCE 0x710
+#define MADERA_AIF1TX3MIX_INPUT_1_VOLUME 0x711
+#define MADERA_AIF1TX3MIX_INPUT_2_SOURCE 0x712
+#define MADERA_AIF1TX3MIX_INPUT_2_VOLUME 0x713
+#define MADERA_AIF1TX3MIX_INPUT_3_SOURCE 0x714
+#define MADERA_AIF1TX3MIX_INPUT_3_VOLUME 0x715
+#define MADERA_AIF1TX3MIX_INPUT_4_SOURCE 0x716
+#define MADERA_AIF1TX3MIX_INPUT_4_VOLUME 0x717
+#define MADERA_AIF1TX4MIX_INPUT_1_SOURCE 0x718
+#define MADERA_AIF1TX4MIX_INPUT_1_VOLUME 0x719
+#define MADERA_AIF1TX4MIX_INPUT_2_SOURCE 0x71A
+#define MADERA_AIF1TX4MIX_INPUT_2_VOLUME 0x71B
+#define MADERA_AIF1TX4MIX_INPUT_3_SOURCE 0x71C
+#define MADERA_AIF1TX4MIX_INPUT_3_VOLUME 0x71D
+#define MADERA_AIF1TX4MIX_INPUT_4_SOURCE 0x71E
+#define MADERA_AIF1TX4MIX_INPUT_4_VOLUME 0x71F
+#define MADERA_AIF1TX5MIX_INPUT_1_SOURCE 0x720
+#define MADERA_AIF1TX5MIX_INPUT_1_VOLUME 0x721
+#define MADERA_AIF1TX5MIX_INPUT_2_SOURCE 0x722
+#define MADERA_AIF1TX5MIX_INPUT_2_VOLUME 0x723
+#define MADERA_AIF1TX5MIX_INPUT_3_SOURCE 0x724
+#define MADERA_AIF1TX5MIX_INPUT_3_VOLUME 0x725
+#define MADERA_AIF1TX5MIX_INPUT_4_SOURCE 0x726
+#define MADERA_AIF1TX5MIX_INPUT_4_VOLUME 0x727
+#define MADERA_AIF1TX6MIX_INPUT_1_SOURCE 0x728
+#define MADERA_AIF1TX6MIX_INPUT_1_VOLUME 0x729
+#define MADERA_AIF1TX6MIX_INPUT_2_SOURCE 0x72A
+#define MADERA_AIF1TX6MIX_INPUT_2_VOLUME 0x72B
+#define MADERA_AIF1TX6MIX_INPUT_3_SOURCE 0x72C
+#define MADERA_AIF1TX6MIX_INPUT_3_VOLUME 0x72D
+#define MADERA_AIF1TX6MIX_INPUT_4_SOURCE 0x72E
+#define MADERA_AIF1TX6MIX_INPUT_4_VOLUME 0x72F
+#define MADERA_AIF1TX7MIX_INPUT_1_SOURCE 0x730
+#define MADERA_AIF1TX7MIX_INPUT_1_VOLUME 0x731
+#define MADERA_AIF1TX7MIX_INPUT_2_SOURCE 0x732
+#define MADERA_AIF1TX7MIX_INPUT_2_VOLUME 0x733
+#define MADERA_AIF1TX7MIX_INPUT_3_SOURCE 0x734
+#define MADERA_AIF1TX7MIX_INPUT_3_VOLUME 0x735
+#define MADERA_AIF1TX7MIX_INPUT_4_SOURCE 0x736
+#define MADERA_AIF1TX7MIX_INPUT_4_VOLUME 0x737
+#define MADERA_AIF1TX8MIX_INPUT_1_SOURCE 0x738
+#define MADERA_AIF1TX8MIX_INPUT_1_VOLUME 0x739
+#define MADERA_AIF1TX8MIX_INPUT_2_SOURCE 0x73A
+#define MADERA_AIF1TX8MIX_INPUT_2_VOLUME 0x73B
+#define MADERA_AIF1TX8MIX_INPUT_3_SOURCE 0x73C
+#define MADERA_AIF1TX8MIX_INPUT_3_VOLUME 0x73D
+#define MADERA_AIF1TX8MIX_INPUT_4_SOURCE 0x73E
+#define MADERA_AIF1TX8MIX_INPUT_4_VOLUME 0x73F
+#define MADERA_AIF2TX1MIX_INPUT_1_SOURCE 0x740
+#define MADERA_AIF2TX1MIX_INPUT_1_VOLUME 0x741
+#define MADERA_AIF2TX1MIX_INPUT_2_SOURCE 0x742
+#define MADERA_AIF2TX1MIX_INPUT_2_VOLUME 0x743
+#define MADERA_AIF2TX1MIX_INPUT_3_SOURCE 0x744
+#define MADERA_AIF2TX1MIX_INPUT_3_VOLUME 0x745
+#define MADERA_AIF2TX1MIX_INPUT_4_SOURCE 0x746
+#define MADERA_AIF2TX1MIX_INPUT_4_VOLUME 0x747
+#define MADERA_AIF2TX2MIX_INPUT_1_SOURCE 0x748
+#define MADERA_AIF2TX2MIX_INPUT_1_VOLUME 0x749
+#define MADERA_AIF2TX2MIX_INPUT_2_SOURCE 0x74A
+#define MADERA_AIF2TX2MIX_INPUT_2_VOLUME 0x74B
+#define MADERA_AIF2TX2MIX_INPUT_3_SOURCE 0x74C
+#define MADERA_AIF2TX2MIX_INPUT_3_VOLUME 0x74D
+#define MADERA_AIF2TX2MIX_INPUT_4_SOURCE 0x74E
+#define MADERA_AIF2TX2MIX_INPUT_4_VOLUME 0x74F
+#define MADERA_AIF2TX3MIX_INPUT_1_SOURCE 0x750
+#define MADERA_AIF2TX3MIX_INPUT_1_VOLUME 0x751
+#define MADERA_AIF2TX3MIX_INPUT_2_SOURCE 0x752
+#define MADERA_AIF2TX3MIX_INPUT_2_VOLUME 0x753
+#define MADERA_AIF2TX3MIX_INPUT_3_SOURCE 0x754
+#define MADERA_AIF2TX3MIX_INPUT_3_VOLUME 0x755
+#define MADERA_AIF2TX3MIX_INPUT_4_SOURCE 0x756
+#define MADERA_AIF2TX3MIX_INPUT_4_VOLUME 0x757
+#define MADERA_AIF2TX4MIX_INPUT_1_SOURCE 0x758
+#define MADERA_AIF2TX4MIX_INPUT_1_VOLUME 0x759
+#define MADERA_AIF2TX4MIX_INPUT_2_SOURCE 0x75A
+#define MADERA_AIF2TX4MIX_INPUT_2_VOLUME 0x75B
+#define MADERA_AIF2TX4MIX_INPUT_3_SOURCE 0x75C
+#define MADERA_AIF2TX4MIX_INPUT_3_VOLUME 0x75D
+#define MADERA_AIF2TX4MIX_INPUT_4_SOURCE 0x75E
+#define MADERA_AIF2TX4MIX_INPUT_4_VOLUME 0x75F
+#define MADERA_AIF2TX5MIX_INPUT_1_SOURCE 0x760
+#define MADERA_AIF2TX5MIX_INPUT_1_VOLUME 0x761
+#define MADERA_AIF2TX5MIX_INPUT_2_SOURCE 0x762
+#define MADERA_AIF2TX5MIX_INPUT_2_VOLUME 0x763
+#define MADERA_AIF2TX5MIX_INPUT_3_SOURCE 0x764
+#define MADERA_AIF2TX5MIX_INPUT_3_VOLUME 0x765
+#define MADERA_AIF2TX5MIX_INPUT_4_SOURCE 0x766
+#define MADERA_AIF2TX5MIX_INPUT_4_VOLUME 0x767
+#define MADERA_AIF2TX6MIX_INPUT_1_SOURCE 0x768
+#define MADERA_AIF2TX6MIX_INPUT_1_VOLUME 0x769
+#define MADERA_AIF2TX6MIX_INPUT_2_SOURCE 0x76A
+#define MADERA_AIF2TX6MIX_INPUT_2_VOLUME 0x76B
+#define MADERA_AIF2TX6MIX_INPUT_3_SOURCE 0x76C
+#define MADERA_AIF2TX6MIX_INPUT_3_VOLUME 0x76D
+#define MADERA_AIF2TX6MIX_INPUT_4_SOURCE 0x76E
+#define MADERA_AIF2TX6MIX_INPUT_4_VOLUME 0x76F
+#define MADERA_AIF2TX7MIX_INPUT_1_SOURCE 0x770
+#define MADERA_AIF2TX7MIX_INPUT_1_VOLUME 0x771
+#define MADERA_AIF2TX7MIX_INPUT_2_SOURCE 0x772
+#define MADERA_AIF2TX7MIX_INPUT_2_VOLUME 0x773
+#define MADERA_AIF2TX7MIX_INPUT_3_SOURCE 0x774
+#define MADERA_AIF2TX7MIX_INPUT_3_VOLUME 0x775
+#define MADERA_AIF2TX7MIX_INPUT_4_SOURCE 0x776
+#define MADERA_AIF2TX7MIX_INPUT_4_VOLUME 0x777
+#define MADERA_AIF2TX8MIX_INPUT_1_SOURCE 0x778
+#define MADERA_AIF2TX8MIX_INPUT_1_VOLUME 0x779
+#define MADERA_AIF2TX8MIX_INPUT_2_SOURCE 0x77A
+#define MADERA_AIF2TX8MIX_INPUT_2_VOLUME 0x77B
+#define MADERA_AIF2TX8MIX_INPUT_3_SOURCE 0x77C
+#define MADERA_AIF2TX8MIX_INPUT_3_VOLUME 0x77D
+#define MADERA_AIF2TX8MIX_INPUT_4_SOURCE 0x77E
+#define MADERA_AIF2TX8MIX_INPUT_4_VOLUME 0x77F
+#define MADERA_AIF3TX1MIX_INPUT_1_SOURCE 0x780
+#define MADERA_AIF3TX1MIX_INPUT_1_VOLUME 0x781
+#define MADERA_AIF3TX1MIX_INPUT_2_SOURCE 0x782
+#define MADERA_AIF3TX1MIX_INPUT_2_VOLUME 0x783
+#define MADERA_AIF3TX1MIX_INPUT_3_SOURCE 0x784
+#define MADERA_AIF3TX1MIX_INPUT_3_VOLUME 0x785
+#define MADERA_AIF3TX1MIX_INPUT_4_SOURCE 0x786
+#define MADERA_AIF3TX1MIX_INPUT_4_VOLUME 0x787
+#define MADERA_AIF3TX2MIX_INPUT_1_SOURCE 0x788
+#define MADERA_AIF3TX2MIX_INPUT_1_VOLUME 0x789
+#define MADERA_AIF3TX2MIX_INPUT_2_SOURCE 0x78A
+#define MADERA_AIF3TX2MIX_INPUT_2_VOLUME 0x78B
+#define MADERA_AIF3TX2MIX_INPUT_3_SOURCE 0x78C
+#define MADERA_AIF3TX2MIX_INPUT_3_VOLUME 0x78D
+#define MADERA_AIF3TX2MIX_INPUT_4_SOURCE 0x78E
+#define MADERA_AIF3TX2MIX_INPUT_4_VOLUME 0x78F
+#define MADERA_AIF4TX1MIX_INPUT_1_SOURCE 0x7A0
+#define MADERA_AIF4TX1MIX_INPUT_1_VOLUME 0x7A1
+#define MADERA_AIF4TX1MIX_INPUT_2_SOURCE 0x7A2
+#define MADERA_AIF4TX1MIX_INPUT_2_VOLUME 0x7A3
+#define MADERA_AIF4TX1MIX_INPUT_3_SOURCE 0x7A4
+#define MADERA_AIF4TX1MIX_INPUT_3_VOLUME 0x7A5
+#define MADERA_AIF4TX1MIX_INPUT_4_SOURCE 0x7A6
+#define MADERA_AIF4TX1MIX_INPUT_4_VOLUME 0x7A7
+#define MADERA_AIF4TX2MIX_INPUT_1_SOURCE 0x7A8
+#define MADERA_AIF4TX2MIX_INPUT_1_VOLUME 0x7A9
+#define MADERA_AIF4TX2MIX_INPUT_2_SOURCE 0x7AA
+#define MADERA_AIF4TX2MIX_INPUT_2_VOLUME 0x7AB
+#define MADERA_AIF4TX2MIX_INPUT_3_SOURCE 0x7AC
+#define MADERA_AIF4TX2MIX_INPUT_3_VOLUME 0x7AD
+#define MADERA_AIF4TX2MIX_INPUT_4_SOURCE 0x7AE
+#define MADERA_AIF4TX2MIX_INPUT_4_VOLUME 0x7AF
+#define MADERA_SLIMTX1MIX_INPUT_1_SOURCE 0x7C0
+#define MADERA_SLIMTX1MIX_INPUT_1_VOLUME 0x7C1
+#define MADERA_SLIMTX1MIX_INPUT_2_SOURCE 0x7C2
+#define MADERA_SLIMTX1MIX_INPUT_2_VOLUME 0x7C3
+#define MADERA_SLIMTX1MIX_INPUT_3_SOURCE 0x7C4
+#define MADERA_SLIMTX1MIX_INPUT_3_VOLUME 0x7C5
+#define MADERA_SLIMTX1MIX_INPUT_4_SOURCE 0x7C6
+#define MADERA_SLIMTX1MIX_INPUT_4_VOLUME 0x7C7
+#define MADERA_SLIMTX2MIX_INPUT_1_SOURCE 0x7C8
+#define MADERA_SLIMTX2MIX_INPUT_1_VOLUME 0x7C9
+#define MADERA_SLIMTX2MIX_INPUT_2_SOURCE 0x7CA
+#define MADERA_SLIMTX2MIX_INPUT_2_VOLUME 0x7CB
+#define MADERA_SLIMTX2MIX_INPUT_3_SOURCE 0x7CC
+#define MADERA_SLIMTX2MIX_INPUT_3_VOLUME 0x7CD
+#define MADERA_SLIMTX2MIX_INPUT_4_SOURCE 0x7CE
+#define MADERA_SLIMTX2MIX_INPUT_4_VOLUME 0x7CF
+#define MADERA_SLIMTX3MIX_INPUT_1_SOURCE 0x7D0
+#define MADERA_SLIMTX3MIX_INPUT_1_VOLUME 0x7D1
+#define MADERA_SLIMTX3MIX_INPUT_2_SOURCE 0x7D2
+#define MADERA_SLIMTX3MIX_INPUT_2_VOLUME 0x7D3
+#define MADERA_SLIMTX3MIX_INPUT_3_SOURCE 0x7D4
+#define MADERA_SLIMTX3MIX_INPUT_3_VOLUME 0x7D5
+#define MADERA_SLIMTX3MIX_INPUT_4_SOURCE 0x7D6
+#define MADERA_SLIMTX3MIX_INPUT_4_VOLUME 0x7D7
+#define MADERA_SLIMTX4MIX_INPUT_1_SOURCE 0x7D8
+#define MADERA_SLIMTX4MIX_INPUT_1_VOLUME 0x7D9
+#define MADERA_SLIMTX4MIX_INPUT_2_SOURCE 0x7DA
+#define MADERA_SLIMTX4MIX_INPUT_2_VOLUME 0x7DB
+#define MADERA_SLIMTX4MIX_INPUT_3_SOURCE 0x7DC
+#define MADERA_SLIMTX4MIX_INPUT_3_VOLUME 0x7DD
+#define MADERA_SLIMTX4MIX_INPUT_4_SOURCE 0x7DE
+#define MADERA_SLIMTX4MIX_INPUT_4_VOLUME 0x7DF
+#define MADERA_SLIMTX5MIX_INPUT_1_SOURCE 0x7E0
+#define MADERA_SLIMTX5MIX_INPUT_1_VOLUME 0x7E1
+#define MADERA_SLIMTX5MIX_INPUT_2_SOURCE 0x7E2
+#define MADERA_SLIMTX5MIX_INPUT_2_VOLUME 0x7E3
+#define MADERA_SLIMTX5MIX_INPUT_3_SOURCE 0x7E4
+#define MADERA_SLIMTX5MIX_INPUT_3_VOLUME 0x7E5
+#define MADERA_SLIMTX5MIX_INPUT_4_SOURCE 0x7E6
+#define MADERA_SLIMTX5MIX_INPUT_4_VOLUME 0x7E7
+#define MADERA_SLIMTX6MIX_INPUT_1_SOURCE 0x7E8
+#define MADERA_SLIMTX6MIX_INPUT_1_VOLUME 0x7E9
+#define MADERA_SLIMTX6MIX_INPUT_2_SOURCE 0x7EA
+#define MADERA_SLIMTX6MIX_INPUT_2_VOLUME 0x7EB
+#define MADERA_SLIMTX6MIX_INPUT_3_SOURCE 0x7EC
+#define MADERA_SLIMTX6MIX_INPUT_3_VOLUME 0x7ED
+#define MADERA_SLIMTX6MIX_INPUT_4_SOURCE 0x7EE
+#define MADERA_SLIMTX6MIX_INPUT_4_VOLUME 0x7EF
+#define MADERA_SLIMTX7MIX_INPUT_1_SOURCE 0x7F0
+#define MADERA_SLIMTX7MIX_INPUT_1_VOLUME 0x7F1
+#define MADERA_SLIMTX7MIX_INPUT_2_SOURCE 0x7F2
+#define MADERA_SLIMTX7MIX_INPUT_2_VOLUME 0x7F3
+#define MADERA_SLIMTX7MIX_INPUT_3_SOURCE 0x7F4
+#define MADERA_SLIMTX7MIX_INPUT_3_VOLUME 0x7F5
+#define MADERA_SLIMTX7MIX_INPUT_4_SOURCE 0x7F6
+#define MADERA_SLIMTX7MIX_INPUT_4_VOLUME 0x7F7
+#define MADERA_SLIMTX8MIX_INPUT_1_SOURCE 0x7F8
+#define MADERA_SLIMTX8MIX_INPUT_1_VOLUME 0x7F9
+#define MADERA_SLIMTX8MIX_INPUT_2_SOURCE 0x7FA
+#define MADERA_SLIMTX8MIX_INPUT_2_VOLUME 0x7FB
+#define MADERA_SLIMTX8MIX_INPUT_3_SOURCE 0x7FC
+#define MADERA_SLIMTX8MIX_INPUT_3_VOLUME 0x7FD
+#define MADERA_SLIMTX8MIX_INPUT_4_SOURCE 0x7FE
+#define MADERA_SLIMTX8MIX_INPUT_4_VOLUME 0x7FF
+#define MADERA_SPDIF1TX1MIX_INPUT_1_SOURCE 0x800
+#define MADERA_SPDIF1TX1MIX_INPUT_1_VOLUME 0x801
+#define MADERA_SPDIF1TX2MIX_INPUT_1_SOURCE 0x808
+#define MADERA_SPDIF1TX2MIX_INPUT_1_VOLUME 0x809
+#define MADERA_EQ1MIX_INPUT_1_SOURCE 0x880
+#define MADERA_EQ1MIX_INPUT_1_VOLUME 0x881
+#define MADERA_EQ1MIX_INPUT_2_SOURCE 0x882
+#define MADERA_EQ1MIX_INPUT_2_VOLUME 0x883
+#define MADERA_EQ1MIX_INPUT_3_SOURCE 0x884
+#define MADERA_EQ1MIX_INPUT_3_VOLUME 0x885
+#define MADERA_EQ1MIX_INPUT_4_SOURCE 0x886
+#define MADERA_EQ1MIX_INPUT_4_VOLUME 0x887
+#define MADERA_EQ2MIX_INPUT_1_SOURCE 0x888
+#define MADERA_EQ2MIX_INPUT_1_VOLUME 0x889
+#define MADERA_EQ2MIX_INPUT_2_SOURCE 0x88A
+#define MADERA_EQ2MIX_INPUT_2_VOLUME 0x88B
+#define MADERA_EQ2MIX_INPUT_3_SOURCE 0x88C
+#define MADERA_EQ2MIX_INPUT_3_VOLUME 0x88D
+#define MADERA_EQ2MIX_INPUT_4_SOURCE 0x88E
+#define MADERA_EQ2MIX_INPUT_4_VOLUME 0x88F
+#define MADERA_EQ3MIX_INPUT_1_SOURCE 0x890
+#define MADERA_EQ3MIX_INPUT_1_VOLUME 0x891
+#define MADERA_EQ3MIX_INPUT_2_SOURCE 0x892
+#define MADERA_EQ3MIX_INPUT_2_VOLUME 0x893
+#define MADERA_EQ3MIX_INPUT_3_SOURCE 0x894
+#define MADERA_EQ3MIX_INPUT_3_VOLUME 0x895
+#define MADERA_EQ3MIX_INPUT_4_SOURCE 0x896
+#define MADERA_EQ3MIX_INPUT_4_VOLUME 0x897
+#define MADERA_EQ4MIX_INPUT_1_SOURCE 0x898
+#define MADERA_EQ4MIX_INPUT_1_VOLUME 0x899
+#define MADERA_EQ4MIX_INPUT_2_SOURCE 0x89A
+#define MADERA_EQ4MIX_INPUT_2_VOLUME 0x89B
+#define MADERA_EQ4MIX_INPUT_3_SOURCE 0x89C
+#define MADERA_EQ4MIX_INPUT_3_VOLUME 0x89D
+#define MADERA_EQ4MIX_INPUT_4_SOURCE 0x89E
+#define MADERA_EQ4MIX_INPUT_4_VOLUME 0x89F
+#define MADERA_DRC1LMIX_INPUT_1_SOURCE 0x8C0
+#define MADERA_DRC1LMIX_INPUT_1_VOLUME 0x8C1
+#define MADERA_DRC1LMIX_INPUT_2_SOURCE 0x8C2
+#define MADERA_DRC1LMIX_INPUT_2_VOLUME 0x8C3
+#define MADERA_DRC1LMIX_INPUT_3_SOURCE 0x8C4
+#define MADERA_DRC1LMIX_INPUT_3_VOLUME 0x8C5
+#define MADERA_DRC1LMIX_INPUT_4_SOURCE 0x8C6
+#define MADERA_DRC1LMIX_INPUT_4_VOLUME 0x8C7
+#define MADERA_DRC1RMIX_INPUT_1_SOURCE 0x8C8
+#define MADERA_DRC1RMIX_INPUT_1_VOLUME 0x8C9
+#define MADERA_DRC1RMIX_INPUT_2_SOURCE 0x8CA
+#define MADERA_DRC1RMIX_INPUT_2_VOLUME 0x8CB
+#define MADERA_DRC1RMIX_INPUT_3_SOURCE 0x8CC
+#define MADERA_DRC1RMIX_INPUT_3_VOLUME 0x8CD
+#define MADERA_DRC1RMIX_INPUT_4_SOURCE 0x8CE
+#define MADERA_DRC1RMIX_INPUT_4_VOLUME 0x8CF
+#define MADERA_DRC2LMIX_INPUT_1_SOURCE 0x8D0
+#define MADERA_DRC2LMIX_INPUT_1_VOLUME 0x8D1
+#define MADERA_DRC2LMIX_INPUT_2_SOURCE 0x8D2
+#define MADERA_DRC2LMIX_INPUT_2_VOLUME 0x8D3
+#define MADERA_DRC2LMIX_INPUT_3_SOURCE 0x8D4
+#define MADERA_DRC2LMIX_INPUT_3_VOLUME 0x8D5
+#define MADERA_DRC2LMIX_INPUT_4_SOURCE 0x8D6
+#define MADERA_DRC2LMIX_INPUT_4_VOLUME 0x8D7
+#define MADERA_DRC2RMIX_INPUT_1_SOURCE 0x8D8
+#define MADERA_DRC2RMIX_INPUT_1_VOLUME 0x8D9
+#define MADERA_DRC2RMIX_INPUT_2_SOURCE 0x8DA
+#define MADERA_DRC2RMIX_INPUT_2_VOLUME 0x8DB
+#define MADERA_DRC2RMIX_INPUT_3_SOURCE 0x8DC
+#define MADERA_DRC2RMIX_INPUT_3_VOLUME 0x8DD
+#define MADERA_DRC2RMIX_INPUT_4_SOURCE 0x8DE
+#define MADERA_DRC2RMIX_INPUT_4_VOLUME 0x8DF
+#define MADERA_HPLP1MIX_INPUT_1_SOURCE 0x900
+#define MADERA_HPLP1MIX_INPUT_1_VOLUME 0x901
+#define MADERA_HPLP1MIX_INPUT_2_SOURCE 0x902
+#define MADERA_HPLP1MIX_INPUT_2_VOLUME 0x903
+#define MADERA_HPLP1MIX_INPUT_3_SOURCE 0x904
+#define MADERA_HPLP1MIX_INPUT_3_VOLUME 0x905
+#define MADERA_HPLP1MIX_INPUT_4_SOURCE 0x906
+#define MADERA_HPLP1MIX_INPUT_4_VOLUME 0x907
+#define MADERA_HPLP2MIX_INPUT_1_SOURCE 0x908
+#define MADERA_HPLP2MIX_INPUT_1_VOLUME 0x909
+#define MADERA_HPLP2MIX_INPUT_2_SOURCE 0x90A
+#define MADERA_HPLP2MIX_INPUT_2_VOLUME 0x90B
+#define MADERA_HPLP2MIX_INPUT_3_SOURCE 0x90C
+#define MADERA_HPLP2MIX_INPUT_3_VOLUME 0x90D
+#define MADERA_HPLP2MIX_INPUT_4_SOURCE 0x90E
+#define MADERA_HPLP2MIX_INPUT_4_VOLUME 0x90F
+#define MADERA_HPLP3MIX_INPUT_1_SOURCE 0x910
+#define MADERA_HPLP3MIX_INPUT_1_VOLUME 0x911
+#define MADERA_HPLP3MIX_INPUT_2_SOURCE 0x912
+#define MADERA_HPLP3MIX_INPUT_2_VOLUME 0x913
+#define MADERA_HPLP3MIX_INPUT_3_SOURCE 0x914
+#define MADERA_HPLP3MIX_INPUT_3_VOLUME 0x915
+#define MADERA_HPLP3MIX_INPUT_4_SOURCE 0x916
+#define MADERA_HPLP3MIX_INPUT_4_VOLUME 0x917
+#define MADERA_HPLP4MIX_INPUT_1_SOURCE 0x918
+#define MADERA_HPLP4MIX_INPUT_1_VOLUME 0x919
+#define MADERA_HPLP4MIX_INPUT_2_SOURCE 0x91A
+#define MADERA_HPLP4MIX_INPUT_2_VOLUME 0x91B
+#define MADERA_HPLP4MIX_INPUT_3_SOURCE 0x91C
+#define MADERA_HPLP4MIX_INPUT_3_VOLUME 0x91D
+#define MADERA_HPLP4MIX_INPUT_4_SOURCE 0x91E
+#define MADERA_HPLP4MIX_INPUT_4_VOLUME 0x91F
+#define MADERA_DSP1LMIX_INPUT_1_SOURCE 0x940
+#define MADERA_DSP1LMIX_INPUT_1_VOLUME 0x941
+#define MADERA_DSP1LMIX_INPUT_2_SOURCE 0x942
+#define MADERA_DSP1LMIX_INPUT_2_VOLUME 0x943
+#define MADERA_DSP1LMIX_INPUT_3_SOURCE 0x944
+#define MADERA_DSP1LMIX_INPUT_3_VOLUME 0x945
+#define MADERA_DSP1LMIX_INPUT_4_SOURCE 0x946
+#define MADERA_DSP1LMIX_INPUT_4_VOLUME 0x947
+#define MADERA_DSP1RMIX_INPUT_1_SOURCE 0x948
+#define MADERA_DSP1RMIX_INPUT_1_VOLUME 0x949
+#define MADERA_DSP1RMIX_INPUT_2_SOURCE 0x94A
+#define MADERA_DSP1RMIX_INPUT_2_VOLUME 0x94B
+#define MADERA_DSP1RMIX_INPUT_3_SOURCE 0x94C
+#define MADERA_DSP1RMIX_INPUT_3_VOLUME 0x94D
+#define MADERA_DSP1RMIX_INPUT_4_SOURCE 0x94E
+#define MADERA_DSP1RMIX_INPUT_4_VOLUME 0x94F
+#define MADERA_DSP1AUX1MIX_INPUT_1_SOURCE 0x950
+#define MADERA_DSP1AUX2MIX_INPUT_1_SOURCE 0x958
+#define MADERA_DSP1AUX3MIX_INPUT_1_SOURCE 0x960
+#define MADERA_DSP1AUX4MIX_INPUT_1_SOURCE 0x968
+#define MADERA_DSP1AUX5MIX_INPUT_1_SOURCE 0x970
+#define MADERA_DSP1AUX6MIX_INPUT_1_SOURCE 0x978
+#define MADERA_DSP2LMIX_INPUT_1_SOURCE 0x980
+#define MADERA_DSP2LMIX_INPUT_1_VOLUME 0x981
+#define MADERA_DSP2LMIX_INPUT_2_SOURCE 0x982
+#define MADERA_DSP2LMIX_INPUT_2_VOLUME 0x983
+#define MADERA_DSP2LMIX_INPUT_3_SOURCE 0x984
+#define MADERA_DSP2LMIX_INPUT_3_VOLUME 0x985
+#define MADERA_DSP2LMIX_INPUT_4_SOURCE 0x986
+#define MADERA_DSP2LMIX_INPUT_4_VOLUME 0x987
+#define MADERA_DSP2RMIX_INPUT_1_SOURCE 0x988
+#define MADERA_DSP2RMIX_INPUT_1_VOLUME 0x989
+#define MADERA_DSP2RMIX_INPUT_2_SOURCE 0x98A
+#define MADERA_DSP2RMIX_INPUT_2_VOLUME 0x98B
+#define MADERA_DSP2RMIX_INPUT_3_SOURCE 0x98C
+#define MADERA_DSP2RMIX_INPUT_3_VOLUME 0x98D
+#define MADERA_DSP2RMIX_INPUT_4_SOURCE 0x98E
+#define MADERA_DSP2RMIX_INPUT_4_VOLUME 0x98F
+#define MADERA_DSP2AUX1MIX_INPUT_1_SOURCE 0x990
+#define MADERA_DSP2AUX2MIX_INPUT_1_SOURCE 0x998
+#define MADERA_DSP2AUX3MIX_INPUT_1_SOURCE 0x9A0
+#define MADERA_DSP2AUX4MIX_INPUT_1_SOURCE 0x9A8
+#define MADERA_DSP2AUX5MIX_INPUT_1_SOURCE 0x9B0
+#define MADERA_DSP2AUX6MIX_INPUT_1_SOURCE 0x9B8
+#define MADERA_DSP3LMIX_INPUT_1_SOURCE 0x9C0
+#define MADERA_DSP3LMIX_INPUT_1_VOLUME 0x9C1
+#define MADERA_DSP3LMIX_INPUT_2_SOURCE 0x9C2
+#define MADERA_DSP3LMIX_INPUT_2_VOLUME 0x9C3
+#define MADERA_DSP3LMIX_INPUT_3_SOURCE 0x9C4
+#define MADERA_DSP3LMIX_INPUT_3_VOLUME 0x9C5
+#define MADERA_DSP3LMIX_INPUT_4_SOURCE 0x9C6
+#define MADERA_DSP3LMIX_INPUT_4_VOLUME 0x9C7
+#define MADERA_DSP3RMIX_INPUT_1_SOURCE 0x9C8
+#define MADERA_DSP3RMIX_INPUT_1_VOLUME 0x9C9
+#define MADERA_DSP3RMIX_INPUT_2_SOURCE 0x9CA
+#define MADERA_DSP3RMIX_INPUT_2_VOLUME 0x9CB
+#define MADERA_DSP3RMIX_INPUT_3_SOURCE 0x9CC
+#define MADERA_DSP3RMIX_INPUT_3_VOLUME 0x9CD
+#define MADERA_DSP3RMIX_INPUT_4_SOURCE 0x9CE
+#define MADERA_DSP3RMIX_INPUT_4_VOLUME 0x9CF
+#define MADERA_DSP3AUX1MIX_INPUT_1_SOURCE 0x9D0
+#define MADERA_DSP3AUX2MIX_INPUT_1_SOURCE 0x9D8
+#define MADERA_DSP3AUX3MIX_INPUT_1_SOURCE 0x9E0
+#define MADERA_DSP3AUX4MIX_INPUT_1_SOURCE 0x9E8
+#define MADERA_DSP3AUX5MIX_INPUT_1_SOURCE 0x9F0
+#define MADERA_DSP3AUX6MIX_INPUT_1_SOURCE 0x9F8
+#define MADERA_DSP4LMIX_INPUT_1_SOURCE 0xA00
+#define MADERA_DSP4LMIX_INPUT_1_VOLUME 0xA01
+#define MADERA_DSP4LMIX_INPUT_2_SOURCE 0xA02
+#define MADERA_DSP4LMIX_INPUT_2_VOLUME 0xA03
+#define MADERA_DSP4LMIX_INPUT_3_SOURCE 0xA04
+#define MADERA_DSP4LMIX_INPUT_3_VOLUME 0xA05
+#define MADERA_DSP4LMIX_INPUT_4_SOURCE 0xA06
+#define MADERA_DSP4LMIX_INPUT_4_VOLUME 0xA07
+#define MADERA_DSP4RMIX_INPUT_1_SOURCE 0xA08
+#define MADERA_DSP4RMIX_INPUT_1_VOLUME 0xA09
+#define MADERA_DSP4RMIX_INPUT_2_SOURCE 0xA0A
+#define MADERA_DSP4RMIX_INPUT_2_VOLUME 0xA0B
+#define MADERA_DSP4RMIX_INPUT_3_SOURCE 0xA0C
+#define MADERA_DSP4RMIX_INPUT_3_VOLUME 0xA0D
+#define MADERA_DSP4RMIX_INPUT_4_SOURCE 0xA0E
+#define MADERA_DSP4RMIX_INPUT_4_VOLUME 0xA0F
+#define MADERA_DSP4AUX1MIX_INPUT_1_SOURCE 0xA10
+#define MADERA_DSP4AUX2MIX_INPUT_1_SOURCE 0xA18
+#define MADERA_DSP4AUX3MIX_INPUT_1_SOURCE 0xA20
+#define MADERA_DSP4AUX4MIX_INPUT_1_SOURCE 0xA28
+#define MADERA_DSP4AUX5MIX_INPUT_1_SOURCE 0xA30
+#define MADERA_DSP4AUX6MIX_INPUT_1_SOURCE 0xA38
+#define MADERA_DSP5LMIX_INPUT_1_SOURCE 0xA40
+#define MADERA_DSP5LMIX_INPUT_1_VOLUME 0xA41
+#define MADERA_DSP5LMIX_INPUT_2_SOURCE 0xA42
+#define MADERA_DSP5LMIX_INPUT_2_VOLUME 0xA43
+#define MADERA_DSP5LMIX_INPUT_3_SOURCE 0xA44
+#define MADERA_DSP5LMIX_INPUT_3_VOLUME 0xA45
+#define MADERA_DSP5LMIX_INPUT_4_SOURCE 0xA46
+#define MADERA_DSP5LMIX_INPUT_4_VOLUME 0xA47
+#define MADERA_DSP5RMIX_INPUT_1_SOURCE 0xA48
+#define MADERA_DSP5RMIX_INPUT_1_VOLUME 0xA49
+#define MADERA_DSP5RMIX_INPUT_2_SOURCE 0xA4A
+#define MADERA_DSP5RMIX_INPUT_2_VOLUME 0xA4B
+#define MADERA_DSP5RMIX_INPUT_3_SOURCE 0xA4C
+#define MADERA_DSP5RMIX_INPUT_3_VOLUME 0xA4D
+#define MADERA_DSP5RMIX_INPUT_4_SOURCE 0xA4E
+#define MADERA_DSP5RMIX_INPUT_4_VOLUME 0xA4F
+#define MADERA_DSP5AUX1MIX_INPUT_1_SOURCE 0xA50
+#define MADERA_DSP5AUX2MIX_INPUT_1_SOURCE 0xA58
+#define MADERA_DSP5AUX3MIX_INPUT_1_SOURCE 0xA60
+#define MADERA_DSP5AUX4MIX_INPUT_1_SOURCE 0xA68
+#define MADERA_DSP5AUX5MIX_INPUT_1_SOURCE 0xA70
+#define MADERA_DSP5AUX6MIX_INPUT_1_SOURCE 0xA78
+#define MADERA_ASRC1_1LMIX_INPUT_1_SOURCE 0xA80
+#define MADERA_ASRC1_1RMIX_INPUT_1_SOURCE 0xA88
+#define MADERA_ASRC1_2LMIX_INPUT_1_SOURCE 0xA90
+#define MADERA_ASRC1_2RMIX_INPUT_1_SOURCE 0xA98
+#define MADERA_ASRC2_1LMIX_INPUT_1_SOURCE 0xAA0
+#define MADERA_ASRC2_1RMIX_INPUT_1_SOURCE 0xAA8
+#define MADERA_ASRC2_2LMIX_INPUT_1_SOURCE 0xAB0
+#define MADERA_ASRC2_2RMIX_INPUT_1_SOURCE 0xAB8
+#define MADERA_ISRC1DEC1MIX_INPUT_1_SOURCE 0xB00
+#define MADERA_ISRC1DEC2MIX_INPUT_1_SOURCE 0xB08
+#define MADERA_ISRC1DEC3MIX_INPUT_1_SOURCE 0xB10
+#define MADERA_ISRC1DEC4MIX_INPUT_1_SOURCE 0xB18
+#define MADERA_ISRC1INT1MIX_INPUT_1_SOURCE 0xB20
+#define MADERA_ISRC1INT2MIX_INPUT_1_SOURCE 0xB28
+#define MADERA_ISRC1INT3MIX_INPUT_1_SOURCE 0xB30
+#define MADERA_ISRC1INT4MIX_INPUT_1_SOURCE 0xB38
+#define MADERA_ISRC2DEC1MIX_INPUT_1_SOURCE 0xB40
+#define MADERA_ISRC2DEC2MIX_INPUT_1_SOURCE 0xB48
+#define MADERA_ISRC2DEC3MIX_INPUT_1_SOURCE 0xB50
+#define MADERA_ISRC2DEC4MIX_INPUT_1_SOURCE 0xB58
+#define MADERA_ISRC2INT1MIX_INPUT_1_SOURCE 0xB60
+#define MADERA_ISRC2INT2MIX_INPUT_1_SOURCE 0xB68
+#define MADERA_ISRC2INT3MIX_INPUT_1_SOURCE 0xB70
+#define MADERA_ISRC2INT4MIX_INPUT_1_SOURCE 0xB78
+#define MADERA_ISRC3DEC1MIX_INPUT_1_SOURCE 0xB80
+#define MADERA_ISRC3DEC2MIX_INPUT_1_SOURCE 0xB88
+#define MADERA_ISRC3DEC3MIX_INPUT_1_SOURCE 0xB90
+#define MADERA_ISRC3DEC4MIX_INPUT_1_SOURCE 0xB98
+#define MADERA_ISRC3INT1MIX_INPUT_1_SOURCE 0xBA0
+#define MADERA_ISRC3INT2MIX_INPUT_1_SOURCE 0xBA8
+#define MADERA_ISRC3INT3MIX_INPUT_1_SOURCE 0xBB0
+#define MADERA_ISRC3INT4MIX_INPUT_1_SOURCE 0xBB8
+#define MADERA_ISRC4DEC1MIX_INPUT_1_SOURCE 0xBC0
+#define MADERA_ISRC4DEC2MIX_INPUT_1_SOURCE 0xBC8
+#define MADERA_ISRC4INT1MIX_INPUT_1_SOURCE 0xBE0
+#define MADERA_ISRC4INT2MIX_INPUT_1_SOURCE 0xBE8
+#define MADERA_DSP6LMIX_INPUT_1_SOURCE 0xC00
+#define MADERA_DSP6LMIX_INPUT_1_VOLUME 0xC01
+#define MADERA_DSP6LMIX_INPUT_2_SOURCE 0xC02
+#define MADERA_DSP6LMIX_INPUT_2_VOLUME 0xC03
+#define MADERA_DSP6LMIX_INPUT_3_SOURCE 0xC04
+#define MADERA_DSP6LMIX_INPUT_3_VOLUME 0xC05
+#define MADERA_DSP6LMIX_INPUT_4_SOURCE 0xC06
+#define MADERA_DSP6LMIX_INPUT_4_VOLUME 0xC07
+#define MADERA_DSP6RMIX_INPUT_1_SOURCE 0xC08
+#define MADERA_DSP6RMIX_INPUT_1_VOLUME 0xC09
+#define MADERA_DSP6RMIX_INPUT_2_SOURCE 0xC0A
+#define MADERA_DSP6RMIX_INPUT_2_VOLUME 0xC0B
+#define MADERA_DSP6RMIX_INPUT_3_SOURCE 0xC0C
+#define MADERA_DSP6RMIX_INPUT_3_VOLUME 0xC0D
+#define MADERA_DSP6RMIX_INPUT_4_SOURCE 0xC0E
+#define MADERA_DSP6RMIX_INPUT_4_VOLUME 0xC0F
+#define MADERA_DSP6AUX1MIX_INPUT_1_SOURCE 0xC10
+#define MADERA_DSP6AUX2MIX_INPUT_1_SOURCE 0xC18
+#define MADERA_DSP6AUX3MIX_INPUT_1_SOURCE 0xC20
+#define MADERA_DSP6AUX4MIX_INPUT_1_SOURCE 0xC28
+#define MADERA_DSP6AUX5MIX_INPUT_1_SOURCE 0xC30
+#define MADERA_DSP6AUX6MIX_INPUT_1_SOURCE 0xC38
+#define MADERA_DSP7LMIX_INPUT_1_SOURCE 0xC40
+#define MADERA_DSP7LMIX_INPUT_1_VOLUME 0xC41
+#define MADERA_DSP7LMIX_INPUT_2_SOURCE 0xC42
+#define MADERA_DSP7LMIX_INPUT_2_VOLUME 0xC43
+#define MADERA_DSP7LMIX_INPUT_3_SOURCE 0xC44
+#define MADERA_DSP7LMIX_INPUT_3_VOLUME 0xC45
+#define MADERA_DSP7LMIX_INPUT_4_SOURCE 0xC46
+#define MADERA_DSP7LMIX_INPUT_4_VOLUME 0xC47
+#define MADERA_DSP7RMIX_INPUT_1_SOURCE 0xC48
+#define MADERA_DSP7RMIX_INPUT_1_VOLUME 0xC49
+#define MADERA_DSP7RMIX_INPUT_2_SOURCE 0xC4A
+#define MADERA_DSP7RMIX_INPUT_2_VOLUME 0xC4B
+#define MADERA_DSP7RMIX_INPUT_3_SOURCE 0xC4C
+#define MADERA_DSP7RMIX_INPUT_3_VOLUME 0xC4D
+#define MADERA_DSP7RMIX_INPUT_4_SOURCE 0xC4E
+#define MADERA_DSP7RMIX_INPUT_4_VOLUME 0xC4F
+#define MADERA_DSP7AUX1MIX_INPUT_1_SOURCE 0xC50
+#define MADERA_DSP7AUX2MIX_INPUT_1_SOURCE 0xC58
+#define MADERA_DSP7AUX3MIX_INPUT_1_SOURCE 0xC60
+#define MADERA_DSP7AUX4MIX_INPUT_1_SOURCE 0xC68
+#define MADERA_DSP7AUX5MIX_INPUT_1_SOURCE 0xC70
+#define MADERA_DSP7AUX6MIX_INPUT_1_SOURCE 0xC78
+#define MADERA_DFC1MIX_INPUT_1_SOURCE 0xDC0
+#define MADERA_DFC2MIX_INPUT_1_SOURCE 0xDC8
+#define MADERA_DFC3MIX_INPUT_1_SOURCE 0xDD0
+#define MADERA_DFC4MIX_INPUT_1_SOURCE 0xDD8
+#define MADERA_DFC5MIX_INPUT_1_SOURCE 0xDE0
+#define MADERA_DFC6MIX_INPUT_1_SOURCE 0xDE8
+#define MADERA_DFC7MIX_INPUT_1_SOURCE 0xDF0
+#define MADERA_DFC8MIX_INPUT_1_SOURCE 0xDF8
+#define MADERA_FX_CTRL1 0xE00
+#define MADERA_FX_CTRL2 0xE01
+#define MADERA_EQ1_1 0xE10
+#define MADERA_EQ1_2 0xE11
+#define MADERA_EQ1_21 0xE24
+#define MADERA_EQ2_1 0xE26
+#define MADERA_EQ2_2 0xE27
+#define MADERA_EQ2_21 0xE3A
+#define MADERA_EQ3_1 0xE3C
+#define MADERA_EQ3_2 0xE3D
+#define MADERA_EQ3_21 0xE50
+#define MADERA_EQ4_1 0xE52
+#define MADERA_EQ4_2 0xE53
+#define MADERA_EQ4_21 0xE66
+#define MADERA_DRC1_CTRL1 0xE80
+#define MADERA_DRC1_CTRL2 0xE81
+#define MADERA_DRC1_CTRL3 0xE82
+#define MADERA_DRC1_CTRL4 0xE83
+#define MADERA_DRC1_CTRL5 0xE84
+#define MADERA_DRC2_CTRL1 0xE88
+#define MADERA_DRC2_CTRL2 0xE89
+#define MADERA_DRC2_CTRL3 0xE8A
+#define MADERA_DRC2_CTRL4 0xE8B
+#define MADERA_DRC2_CTRL5 0xE8C
+#define MADERA_HPLPF1_1 0xEC0
+#define MADERA_HPLPF1_2 0xEC1
+#define MADERA_HPLPF2_1 0xEC4
+#define MADERA_HPLPF2_2 0xEC5
+#define MADERA_HPLPF3_1 0xEC8
+#define MADERA_HPLPF3_2 0xEC9
+#define MADERA_HPLPF4_1 0xECC
+#define MADERA_HPLPF4_2 0xECD
+#define MADERA_ASRC2_ENABLE 0xED0
+#define MADERA_ASRC2_STATUS 0xED1
+#define MADERA_ASRC2_RATE1 0xED2
+#define MADERA_ASRC2_RATE2 0xED3
+#define MADERA_ASRC1_ENABLE 0xEE0
+#define MADERA_ASRC1_STATUS 0xEE1
+#define MADERA_ASRC1_RATE1 0xEE2
+#define MADERA_ASRC1_RATE2 0xEE3
+#define MADERA_ISRC_1_CTRL_1 0xEF0
+#define MADERA_ISRC_1_CTRL_2 0xEF1
+#define MADERA_ISRC_1_CTRL_3 0xEF2
+#define MADERA_ISRC_2_CTRL_1 0xEF3
+#define MADERA_ISRC_2_CTRL_2 0xEF4
+#define MADERA_ISRC_2_CTRL_3 0xEF5
+#define MADERA_ISRC_3_CTRL_1 0xEF6
+#define MADERA_ISRC_3_CTRL_2 0xEF7
+#define MADERA_ISRC_3_CTRL_3 0xEF8
+#define MADERA_ISRC_4_CTRL_1 0xEF9
+#define MADERA_ISRC_4_CTRL_2 0xEFA
+#define MADERA_ISRC_4_CTRL_3 0xEFB
+#define MADERA_CLOCK_CONTROL 0xF00
+#define MADERA_ANC_SRC 0xF01
+#define MADERA_DSP_STATUS 0xF02
+#define MADERA_ANC_COEFF_START 0xF08
+#define MADERA_ANC_COEFF_END 0xF12
+#define MADERA_FCL_FILTER_CONTROL 0xF15
+#define MADERA_FCL_ADC_REFORMATTER_CONTROL 0xF17
+#define MADERA_FCL_COEFF_START 0xF18
+#define MADERA_FCL_COEFF_END 0xF69
+#define MADERA_FCR_FILTER_CONTROL 0xF71
+#define MADERA_FCR_ADC_REFORMATTER_CONTROL 0xF73
+#define MADERA_FCR_COEFF_START 0xF74
+#define MADERA_FCR_COEFF_END 0xFC5
+#define MADERA_DAC_COMP_1 0x1300
+#define MADERA_DAC_COMP_2 0x1302
+#define MADERA_FRF_COEFFICIENT_1L_1 0x1380
+#define MADERA_FRF_COEFFICIENT_1L_2 0x1381
+#define MADERA_FRF_COEFFICIENT_1L_3 0x1382
+#define MADERA_FRF_COEFFICIENT_1L_4 0x1383
+#define MADERA_FRF_COEFFICIENT_1R_1 0x1390
+#define MADERA_FRF_COEFFICIENT_1R_2 0x1391
+#define MADERA_FRF_COEFFICIENT_1R_3 0x1392
+#define MADERA_FRF_COEFFICIENT_1R_4 0x1393
+#define MADERA_FRF_COEFFICIENT_2L_1 0x13A0
+#define MADERA_FRF_COEFFICIENT_2L_2 0x13A1
+#define MADERA_FRF_COEFFICIENT_2L_3 0x13A2
+#define MADERA_FRF_COEFFICIENT_2L_4 0x13A3
+#define MADERA_FRF_COEFFICIENT_2R_1 0x13B0
+#define MADERA_FRF_COEFFICIENT_2R_2 0x13B1
+#define MADERA_FRF_COEFFICIENT_2R_3 0x13B2
+#define MADERA_FRF_COEFFICIENT_2R_4 0x13B3
+#define MADERA_FRF_COEFFICIENT_3L_1 0x13C0
+#define MADERA_FRF_COEFFICIENT_3L_2 0x13C1
+#define MADERA_FRF_COEFFICIENT_3L_3 0x13C2
+#define MADERA_FRF_COEFFICIENT_3L_4 0x13C3
+#define MADERA_FRF_COEFFICIENT_3R_1 0x13D0
+#define MADERA_FRF_COEFFICIENT_3R_2 0x13D1
+#define MADERA_FRF_COEFFICIENT_3R_3 0x13D2
+#define MADERA_FRF_COEFFICIENT_3R_4 0x13D3
+#define MADERA_FRF_COEFFICIENT_4L_1 0x13E0
+#define MADERA_FRF_COEFFICIENT_4L_2 0x13E1
+#define MADERA_FRF_COEFFICIENT_4L_3 0x13E2
+#define MADERA_FRF_COEFFICIENT_4L_4 0x13E3
+#define MADERA_FRF_COEFFICIENT_4R_1 0x13F0
+#define MADERA_FRF_COEFFICIENT_4R_2 0x13F1
+#define MADERA_FRF_COEFFICIENT_4R_3 0x13F2
+#define MADERA_FRF_COEFFICIENT_4R_4 0x13F3
+#define CS47L35_FRF_COEFFICIENT_4L_1 0x13A0
+#define CS47L35_FRF_COEFFICIENT_4L_2 0x13A1
+#define CS47L35_FRF_COEFFICIENT_4L_3 0x13A2
+#define CS47L35_FRF_COEFFICIENT_4L_4 0x13A3
+#define CS47L35_FRF_COEFFICIENT_5L_1 0x13B0
+#define CS47L35_FRF_COEFFICIENT_5L_2 0x13B1
+#define CS47L35_FRF_COEFFICIENT_5L_3 0x13B2
+#define CS47L35_FRF_COEFFICIENT_5L_4 0x13B3
+#define CS47L35_FRF_COEFFICIENT_5R_1 0x13C0
+#define CS47L35_FRF_COEFFICIENT_5R_2 0x13C1
+#define CS47L35_FRF_COEFFICIENT_5R_3 0x13C2
+#define CS47L35_FRF_COEFFICIENT_5R_4 0x13C3
+#define MADERA_FRF_COEFFICIENT_5L_1 0x1400
+#define MADERA_FRF_COEFFICIENT_5L_2 0x1401
+#define MADERA_FRF_COEFFICIENT_5L_3 0x1402
+#define MADERA_FRF_COEFFICIENT_5L_4 0x1403
+#define MADERA_FRF_COEFFICIENT_5R_1 0x1410
+#define MADERA_FRF_COEFFICIENT_5R_2 0x1411
+#define MADERA_FRF_COEFFICIENT_5R_3 0x1412
+#define MADERA_FRF_COEFFICIENT_5R_4 0x1413
+#define MADERA_FRF_COEFFICIENT_6L_1 0x1420
+#define MADERA_FRF_COEFFICIENT_6L_2 0x1421
+#define MADERA_FRF_COEFFICIENT_6L_3 0x1422
+#define MADERA_FRF_COEFFICIENT_6L_4 0x1423
+#define MADERA_FRF_COEFFICIENT_6R_1 0x1430
+#define MADERA_FRF_COEFFICIENT_6R_2 0x1431
+#define MADERA_FRF_COEFFICIENT_6R_3 0x1432
+#define MADERA_FRF_COEFFICIENT_6R_4 0x1433
+#define MADERA_DFC1_CTRL 0x1480
+#define MADERA_DFC1_RX 0x1482
+#define MADERA_DFC1_TX 0x1484
+#define MADERA_DFC2_CTRL 0x1486
+#define MADERA_DFC2_RX 0x1488
+#define MADERA_DFC2_TX 0x148A
+#define MADERA_DFC3_CTRL 0x148C
+#define MADERA_DFC3_RX 0x148E
+#define MADERA_DFC3_TX 0x1490
+#define MADERA_DFC4_CTRL 0x1492
+#define MADERA_DFC4_RX 0x1494
+#define MADERA_DFC4_TX 0x1496
+#define MADERA_DFC5_CTRL 0x1498
+#define MADERA_DFC5_RX 0x149A
+#define MADERA_DFC5_TX 0x149C
+#define MADERA_DFC6_CTRL 0x149E
+#define MADERA_DFC6_RX 0x14A0
+#define MADERA_DFC6_TX 0x14A2
+#define MADERA_DFC7_CTRL 0x14A4
+#define MADERA_DFC7_RX 0x14A6
+#define MADERA_DFC7_TX 0x14A8
+#define MADERA_DFC8_CTRL 0x14AA
+#define MADERA_DFC8_RX 0x14AC
+#define MADERA_DFC8_TX 0x14AE
+#define MADERA_DFC_STATUS 0x14B6
+#define MADERA_ADSP2_IRQ0 0x1600
+#define MADERA_ADSP2_IRQ1 0x1601
+#define MADERA_ADSP2_IRQ2 0x1602
+#define MADERA_ADSP2_IRQ3 0x1603
+#define MADERA_ADSP2_IRQ4 0x1604
+#define MADERA_ADSP2_IRQ5 0x1605
+#define MADERA_ADSP2_IRQ6 0x1606
+#define MADERA_ADSP2_IRQ7 0x1607
+#define MADERA_GPIO1_CTRL_1 0x1700
+#define MADERA_GPIO1_CTRL_2 0x1701
+#define MADERA_GPIO2_CTRL_1 0x1702
+#define MADERA_GPIO2_CTRL_2 0x1703
+#define MADERA_GPIO16_CTRL_1 0x171E
+#define MADERA_GPIO16_CTRL_2 0x171F
+#define MADERA_GPIO38_CTRL_1 0x174A
+#define MADERA_GPIO38_CTRL_2 0x174B
+#define MADERA_GPIO40_CTRL_1 0x174E
+#define MADERA_GPIO40_CTRL_2 0x174F
+#define MADERA_IRQ1_STATUS_1 0x1800
+#define MADERA_IRQ1_STATUS_2 0x1801
+#define MADERA_IRQ1_STATUS_6 0x1805
+#define MADERA_IRQ1_STATUS_7 0x1806
+#define MADERA_IRQ1_STATUS_9 0x1808
+#define MADERA_IRQ1_STATUS_11 0x180A
+#define MADERA_IRQ1_STATUS_12 0x180B
+#define MADERA_IRQ1_STATUS_15 0x180E
+#define MADERA_IRQ1_STATUS_33 0x1820
+#define MADERA_IRQ1_MASK_1 0x1840
+#define MADERA_IRQ1_MASK_2 0x1841
+#define MADERA_IRQ1_MASK_6 0x1845
+#define MADERA_IRQ1_MASK_33 0x1860
+#define MADERA_IRQ1_RAW_STATUS_1 0x1880
+#define MADERA_IRQ1_RAW_STATUS_2 0x1881
+#define MADERA_IRQ1_RAW_STATUS_7 0x1886
+#define MADERA_IRQ1_RAW_STATUS_15 0x188E
+#define MADERA_IRQ1_RAW_STATUS_33 0x18A0
+#define MADERA_INTERRUPT_DEBOUNCE_7 0x1A06
+#define MADERA_INTERRUPT_DEBOUNCE_15 0x1A0E
+#define MADERA_IRQ1_CTRL 0x1A80
+#define MADERA_IRQ2_CTRL 0x1A82
+#define MADERA_INTERRUPT_RAW_STATUS_1 0x1AA0
+#define MADERA_WSEQ_SEQUENCE_1 0x3000
+#define MADERA_WSEQ_SEQUENCE_252 0x31F6
+#define CS47L35_OTP_HPDET_CAL_1 0x31F8
+#define CS47L35_OTP_HPDET_CAL_2 0x31FA
+#define MADERA_WSEQ_SEQUENCE_508 0x33F6
+#define CS47L85_OTP_HPDET_CAL_1 0x33F8
+#define CS47L85_OTP_HPDET_CAL_2 0x33FA
+#define MADERA_OTP_HPDET_CAL_1 0x20004
+#define MADERA_OTP_HPDET_CAL_2 0x20006
+#define MADERA_DSP1_CONFIG_1 0x0FFE00
+#define MADERA_DSP1_CONFIG_2 0x0FFE02
+#define MADERA_DSP1_SCRATCH_1 0x0FFE40
+#define MADERA_DSP1_SCRATCH_2 0x0FFE42
+#define MADERA_DSP1_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0xFFE7C
+#define MADERA_DSP2_CONFIG_1 0x17FE00
+#define MADERA_DSP2_CONFIG_2 0x17FE02
+#define MADERA_DSP2_SCRATCH_1 0x17FE40
+#define MADERA_DSP2_SCRATCH_2 0x17FE42
+#define MADERA_DSP2_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x17FE7C
+#define MADERA_DSP3_CONFIG_1 0x1FFE00
+#define MADERA_DSP3_CONFIG_2 0x1FFE02
+#define MADERA_DSP3_SCRATCH_1 0x1FFE40
+#define MADERA_DSP3_SCRATCH_2 0x1FFE42
+#define MADERA_DSP3_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x1FFE7C
+#define MADERA_DSP4_CONFIG_1 0x27FE00
+#define MADERA_DSP4_CONFIG_2 0x27FE02
+#define MADERA_DSP4_SCRATCH_1 0x27FE40
+#define MADERA_DSP4_SCRATCH_2 0x27FE42
+#define MADERA_DSP4_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x27FE7C
+#define MADERA_DSP5_CONFIG_1 0x2FFE00
+#define MADERA_DSP5_CONFIG_2 0x2FFE02
+#define MADERA_DSP5_SCRATCH_1 0x2FFE40
+#define MADERA_DSP5_SCRATCH_2 0x2FFE42
+#define MADERA_DSP5_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x2FFE7C
+#define MADERA_DSP6_CONFIG_1 0x37FE00
+#define MADERA_DSP6_CONFIG_2 0x37FE02
+#define MADERA_DSP6_SCRATCH_1 0x37FE40
+#define MADERA_DSP6_SCRATCH_2 0x37FE42
+#define MADERA_DSP6_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x37FE7C
+#define MADERA_DSP7_CONFIG_1 0x3FFE00
+#define MADERA_DSP7_CONFIG_2 0x3FFE02
+#define MADERA_DSP7_SCRATCH_1 0x3FFE40
+#define MADERA_DSP7_SCRATCH_2 0x3FFE42
+#define MADERA_DSP7_PMEM_ERR_ADDR___XMEM_ERR_ADDR 0x3FFE7C
+
+/* (0x0000) Software_Reset */
+#define MADERA_SW_RST_DEV_ID1_MASK 0xFFFF
+#define MADERA_SW_RST_DEV_ID1_SHIFT 0
+#define MADERA_SW_RST_DEV_ID1_WIDTH 16
+
+/* (0x0001) Hardware_Revision */
+#define MADERA_HW_REVISION_MASK 0x00FF
+#define MADERA_HW_REVISION_SHIFT 0
+#define MADERA_HW_REVISION_WIDTH 8
+
+/* (0x0020) Tone_Generator_1 */
+#define MADERA_TONE2_ENA 0x0002
+#define MADERA_TONE2_ENA_MASK 0x0002
+#define MADERA_TONE2_ENA_SHIFT 1
+#define MADERA_TONE2_ENA_WIDTH 1
+#define MADERA_TONE1_ENA 0x0001
+#define MADERA_TONE1_ENA_MASK 0x0001
+#define MADERA_TONE1_ENA_SHIFT 0
+#define MADERA_TONE1_ENA_WIDTH 1
+
+/* (0x0021) Tone_Generator_2 */
+#define MADERA_TONE1_LVL_0_MASK 0xFFFF
+#define MADERA_TONE1_LVL_0_SHIFT 0
+#define MADERA_TONE1_LVL_0_WIDTH 16
+
+/* (0x0022) Tone_Generator_3 */
+#define MADERA_TONE1_LVL_MASK 0x00FF
+#define MADERA_TONE1_LVL_SHIFT 0
+#define MADERA_TONE1_LVL_WIDTH 8
+
+/* (0x0023) Tone_Generator_4 */
+#define MADERA_TONE2_LVL_0_MASK 0xFFFF
+#define MADERA_TONE2_LVL_0_SHIFT 0
+#define MADERA_TONE2_LVL_0_WIDTH 16
+
+/* (0x0024) Tone_Generator_5 */
+#define MADERA_TONE2_LVL_MASK 0x00FF
+#define MADERA_TONE2_LVL_SHIFT 0
+#define MADERA_TONE2_LVL_WIDTH 8
+
+/* (0x0030) PWM_Drive_1 */
+#define MADERA_PWM2_ENA 0x0002
+#define MADERA_PWM2_ENA_MASK 0x0002
+#define MADERA_PWM2_ENA_SHIFT 1
+#define MADERA_PWM2_ENA_WIDTH 1
+#define MADERA_PWM1_ENA 0x0001
+#define MADERA_PWM1_ENA_MASK 0x0001
+#define MADERA_PWM1_ENA_SHIFT 0
+#define MADERA_PWM1_ENA_WIDTH 1
+
+/* (0x00A0) Comfort_Noise_Generator */
+#define MADERA_NOISE_GEN_ENA 0x0020
+#define MADERA_NOISE_GEN_ENA_MASK 0x0020
+#define MADERA_NOISE_GEN_ENA_SHIFT 5
+#define MADERA_NOISE_GEN_ENA_WIDTH 1
+#define MADERA_NOISE_GEN_GAIN_MASK 0x001F
+#define MADERA_NOISE_GEN_GAIN_SHIFT 0
+#define MADERA_NOISE_GEN_GAIN_WIDTH 5
+
+/* (0x0100) Clock_32k_1 */
+#define MADERA_CLK_32K_ENA 0x0040
+#define MADERA_CLK_32K_ENA_MASK 0x0040
+#define MADERA_CLK_32K_ENA_SHIFT 6
+#define MADERA_CLK_32K_ENA_WIDTH 1
+#define MADERA_CLK_32K_SRC_MASK 0x0003
+#define MADERA_CLK_32K_SRC_SHIFT 0
+#define MADERA_CLK_32K_SRC_WIDTH 2
+
+/* (0x0101) System_Clock_1 */
+#define MADERA_SYSCLK_FRAC 0x8000
+#define MADERA_SYSCLK_FRAC_MASK 0x8000
+#define MADERA_SYSCLK_FRAC_SHIFT 15
+#define MADERA_SYSCLK_FRAC_WIDTH 1
+#define MADERA_SYSCLK_FREQ_MASK 0x0700
+#define MADERA_SYSCLK_FREQ_SHIFT 8
+#define MADERA_SYSCLK_FREQ_WIDTH 3
+#define MADERA_SYSCLK_ENA 0x0040
+#define MADERA_SYSCLK_ENA_MASK 0x0040
+#define MADERA_SYSCLK_ENA_SHIFT 6
+#define MADERA_SYSCLK_ENA_WIDTH 1
+#define MADERA_SYSCLK_SRC_MASK 0x000F
+#define MADERA_SYSCLK_SRC_SHIFT 0
+#define MADERA_SYSCLK_SRC_WIDTH 4
+
+/* (0x0102) Sample_rate_1 */
+#define MADERA_SAMPLE_RATE_1_MASK 0x001F
+#define MADERA_SAMPLE_RATE_1_SHIFT 0
+#define MADERA_SAMPLE_RATE_1_WIDTH 5
+
+/* (0x0103) Sample_rate_2 */
+#define MADERA_SAMPLE_RATE_2_MASK 0x001F
+#define MADERA_SAMPLE_RATE_2_SHIFT 0
+#define MADERA_SAMPLE_RATE_2_WIDTH 5
+
+/* (0x0104) Sample_rate_3 */
+#define MADERA_SAMPLE_RATE_3_MASK 0x001F
+#define MADERA_SAMPLE_RATE_3_SHIFT 0
+#define MADERA_SAMPLE_RATE_3_WIDTH 5
+
+/* (0x0112) Async_clock_1 */
+#define MADERA_ASYNC_CLK_FREQ_MASK 0x0700
+#define MADERA_ASYNC_CLK_FREQ_SHIFT 8
+#define MADERA_ASYNC_CLK_FREQ_WIDTH 3
+#define MADERA_ASYNC_CLK_ENA 0x0040
+#define MADERA_ASYNC_CLK_ENA_MASK 0x0040
+#define MADERA_ASYNC_CLK_ENA_SHIFT 6
+#define MADERA_ASYNC_CLK_ENA_WIDTH 1
+#define MADERA_ASYNC_CLK_SRC_MASK 0x000F
+#define MADERA_ASYNC_CLK_SRC_SHIFT 0
+#define MADERA_ASYNC_CLK_SRC_WIDTH 4
+
+/* (0x0113) Async_sample_rate_1 */
+#define MADERA_ASYNC_SAMPLE_RATE_1_MASK 0x001F
+#define MADERA_ASYNC_SAMPLE_RATE_1_SHIFT 0
+#define MADERA_ASYNC_SAMPLE_RATE_1_WIDTH 5
+
+/* (0x0114) Async_sample_rate_2 */
+#define MADERA_ASYNC_SAMPLE_RATE_2_MASK 0x001F
+#define MADERA_ASYNC_SAMPLE_RATE_2_SHIFT 0
+#define MADERA_ASYNC_SAMPLE_RATE_2_WIDTH 5
+
+/* (0x0120) DSP_Clock_1 */
+#define MADERA_DSP_CLK_FREQ_LEGACY 0x0700
+#define MADERA_DSP_CLK_FREQ_LEGACY_MASK 0x0700
+#define MADERA_DSP_CLK_FREQ_LEGACY_SHIFT 8
+#define MADERA_DSP_CLK_FREQ_LEGACY_WIDTH 3
+#define MADERA_DSP_CLK_ENA 0x0040
+#define MADERA_DSP_CLK_ENA_MASK 0x0040
+#define MADERA_DSP_CLK_ENA_SHIFT 6
+#define MADERA_DSP_CLK_ENA_WIDTH 1
+#define MADERA_DSP_CLK_SRC 0x000F
+#define MADERA_DSP_CLK_SRC_MASK 0x000F
+#define MADERA_DSP_CLK_SRC_SHIFT 0
+#define MADERA_DSP_CLK_SRC_WIDTH 4
+
+/* (0x0122) DSP_Clock_2 */
+#define MADERA_DSP_CLK_FREQ_MASK 0x03FF
+#define MADERA_DSP_CLK_FREQ_SHIFT 0
+#define MADERA_DSP_CLK_FREQ_WIDTH 10
+
+/* (0x0149) Output_system_clock */
+#define MADERA_OPCLK_ENA 0x8000
+#define MADERA_OPCLK_ENA_MASK 0x8000
+#define MADERA_OPCLK_ENA_SHIFT 15
+#define MADERA_OPCLK_ENA_WIDTH 1
+#define MADERA_OPCLK_DIV_MASK 0x00F8
+#define MADERA_OPCLK_DIV_SHIFT 3
+#define MADERA_OPCLK_DIV_WIDTH 5
+#define MADERA_OPCLK_SEL_MASK 0x0007
+#define MADERA_OPCLK_SEL_SHIFT 0
+#define MADERA_OPCLK_SEL_WIDTH 3
+
+/* (0x014A) Output_async_clock */
+#define MADERA_OPCLK_ASYNC_ENA 0x8000
+#define MADERA_OPCLK_ASYNC_ENA_MASK 0x8000
+#define MADERA_OPCLK_ASYNC_ENA_SHIFT 15
+#define MADERA_OPCLK_ASYNC_ENA_WIDTH 1
+#define MADERA_OPCLK_ASYNC_DIV_MASK 0x00F8
+#define MADERA_OPCLK_ASYNC_DIV_SHIFT 3
+#define MADERA_OPCLK_ASYNC_DIV_WIDTH 5
+#define MADERA_OPCLK_ASYNC_SEL_MASK 0x0007
+#define MADERA_OPCLK_ASYNC_SEL_SHIFT 0
+#define MADERA_OPCLK_ASYNC_SEL_WIDTH 3
+
+/* (0x0171) FLL1_Control_1 */
+#define MADERA_FLL1_FREERUN 0x0002
+#define MADERA_FLL1_FREERUN_MASK 0x0002
+#define MADERA_FLL1_FREERUN_SHIFT 1
+#define MADERA_FLL1_FREERUN_WIDTH 1
+#define MADERA_FLL1_ENA 0x0001
+#define MADERA_FLL1_ENA_MASK 0x0001
+#define MADERA_FLL1_ENA_SHIFT 0
+#define MADERA_FLL1_ENA_WIDTH 1
+
+/* (0x0172) FLL1_Control_2 */
+#define MADERA_FLL1_CTRL_UPD 0x8000
+#define MADERA_FLL1_CTRL_UPD_MASK 0x8000
+#define MADERA_FLL1_CTRL_UPD_SHIFT 15
+#define MADERA_FLL1_CTRL_UPD_WIDTH 1
+#define MADERA_FLL1_N_MASK 0x03FF
+#define MADERA_FLL1_N_SHIFT 0
+#define MADERA_FLL1_N_WIDTH 10
+
+/* (0x0173) FLL1_Control_3 */
+#define MADERA_FLL1_THETA_MASK 0xFFFF
+#define MADERA_FLL1_THETA_SHIFT 0
+#define MADERA_FLL1_THETA_WIDTH 16
+
+/* (0x0174) FLL1_Control_4 */
+#define MADERA_FLL1_LAMBDA_MASK 0xFFFF
+#define MADERA_FLL1_LAMBDA_SHIFT 0
+#define MADERA_FLL1_LAMBDA_WIDTH 16
+
+/* (0x0175) FLL1_Control_5 */
+#define MADERA_FLL1_FRATIO_MASK 0x0F00
+#define MADERA_FLL1_FRATIO_SHIFT 8
+#define MADERA_FLL1_FRATIO_WIDTH 4
+
+/* (0x0176) FLL1_Control_6 */
+#define MADERA_FLL1_REFCLK_DIV_MASK 0x00C0
+#define MADERA_FLL1_REFCLK_DIV_SHIFT 6
+#define MADERA_FLL1_REFCLK_DIV_WIDTH 2
+#define MADERA_FLL1_REFCLK_SRC_MASK 0x000F
+#define MADERA_FLL1_REFCLK_SRC_SHIFT 0
+#define MADERA_FLL1_REFCLK_SRC_WIDTH 4
+
+/* (0x0177) FLL1_Loop_Filter_Test_1 */
+#define MADERA_FLL1_FRC_INTEG_UPD 0x8000
+#define MADERA_FLL1_FRC_INTEG_UPD_MASK 0x8000
+#define MADERA_FLL1_FRC_INTEG_UPD_SHIFT 15
+#define MADERA_FLL1_FRC_INTEG_UPD_WIDTH 1
+#define MADERA_FLL1_FRC_INTEG_VAL_MASK 0x0FFF
+#define MADERA_FLL1_FRC_INTEG_VAL_SHIFT 0
+#define MADERA_FLL1_FRC_INTEG_VAL_WIDTH 12
+
+/* (0x0179) FLL1_Control_7 */
+#define MADERA_FLL1_GAIN_MASK 0x003c
+#define MADERA_FLL1_GAIN_SHIFT 2
+#define MADERA_FLL1_GAIN_WIDTH 4
+
+/* (0x017A) FLL1_EFS_2 */
+#define MADERA_FLL1_PHASE_GAIN_MASK 0xF000
+#define MADERA_FLL1_PHASE_GAIN_SHIFT 12
+#define MADERA_FLL1_PHASE_GAIN_WIDTH 4
+#define MADERA_FLL1_PHASE_ENA_MASK 0x0800
+#define MADERA_FLL1_PHASE_ENA_SHIFT 11
+#define MADERA_FLL1_PHASE_ENA_WIDTH 1
+
+/* (0x0181) FLL1_Synchroniser_1 */
+#define MADERA_FLL1_SYNC_ENA 0x0001
+#define MADERA_FLL1_SYNC_ENA_MASK 0x0001
+#define MADERA_FLL1_SYNC_ENA_SHIFT 0
+#define MADERA_FLL1_SYNC_ENA_WIDTH 1
+
+/* (0x0182) FLL1_Synchroniser_2 */
+#define MADERA_FLL1_SYNC_N_MASK 0x03FF
+#define MADERA_FLL1_SYNC_N_SHIFT 0
+#define MADERA_FLL1_SYNC_N_WIDTH 10
+
+/* (0x0183) FLL1_Synchroniser_3 */
+#define MADERA_FLL1_SYNC_THETA_MASK 0xFFFF
+#define MADERA_FLL1_SYNC_THETA_SHIFT 0
+#define MADERA_FLL1_SYNC_THETA_WIDTH 16
+
+/* (0x0184) FLL1_Synchroniser_4 */
+#define MADERA_FLL1_SYNC_LAMBDA_MASK 0xFFFF
+#define MADERA_FLL1_SYNC_LAMBDA_SHIFT 0
+#define MADERA_FLL1_SYNC_LAMBDA_WIDTH 16
+
+/* (0x0185) FLL1_Synchroniser_5 */
+#define MADERA_FLL1_SYNC_FRATIO_MASK 0x0700
+#define MADERA_FLL1_SYNC_FRATIO_SHIFT 8
+#define MADERA_FLL1_SYNC_FRATIO_WIDTH 3
+
+/* (0x0186) FLL1_Synchroniser_6 */
+#define MADERA_FLL1_SYNCCLK_DIV_MASK 0x00C0
+#define MADERA_FLL1_SYNCCLK_DIV_SHIFT 6
+#define MADERA_FLL1_SYNCCLK_DIV_WIDTH 2
+#define MADERA_FLL1_SYNCCLK_SRC_MASK 0x000F
+#define MADERA_FLL1_SYNCCLK_SRC_SHIFT 0
+#define MADERA_FLL1_SYNCCLK_SRC_WIDTH 4
+
+/* (0x0187) FLL1_Synchroniser_7 */
+#define MADERA_FLL1_SYNC_GAIN_MASK 0x003c
+#define MADERA_FLL1_SYNC_GAIN_SHIFT 2
+#define MADERA_FLL1_SYNC_GAIN_WIDTH 4
+#define MADERA_FLL1_SYNC_DFSAT 0x0001
+#define MADERA_FLL1_SYNC_DFSAT_MASK 0x0001
+#define MADERA_FLL1_SYNC_DFSAT_SHIFT 0
+#define MADERA_FLL1_SYNC_DFSAT_WIDTH 1
+
+/* (0x01D1) FLL_AO_Control_1 */
+#define MADERA_FLL_AO_HOLD 0x0004
+#define MADERA_FLL_AO_HOLD_MASK 0x0004
+#define MADERA_FLL_AO_HOLD_SHIFT 2
+#define MADERA_FLL_AO_HOLD_WIDTH 1
+#define MADERA_FLL_AO_FREERUN 0x0002
+#define MADERA_FLL_AO_FREERUN_MASK 0x0002
+#define MADERA_FLL_AO_FREERUN_SHIFT 1
+#define MADERA_FLL_AO_FREERUN_WIDTH 1
+#define MADERA_FLL_AO_ENA 0x0001
+#define MADERA_FLL_AO_ENA_MASK 0x0001
+#define MADERA_FLL_AO_ENA_SHIFT 0
+#define MADERA_FLL_AO_ENA_WIDTH 1
+
+/* (0x01D2) FLL_AO_Control_2 */
+#define MADERA_FLL_AO_CTRL_UPD 0x8000
+#define MADERA_FLL_AO_CTRL_UPD_MASK 0x8000
+#define MADERA_FLL_AO_CTRL_UPD_SHIFT 15
+#define MADERA_FLL_AO_CTRL_UPD_WIDTH 1
+
+/* (0x01D6) FLL_AO_Control_6 */
+#define MADERA_FLL_AO_REFCLK_SRC_MASK 0x000F
+#define MADERA_FLL_AO_REFCLK_SRC_SHIFT 0
+#define MADERA_FLL_AO_REFCLK_SRC_WIDTH 4
+
+/* (0x0200) Mic_Charge_Pump_1 */
+#define MADERA_CPMIC_BYPASS 0x0002
+#define MADERA_CPMIC_BYPASS_MASK 0x0002
+#define MADERA_CPMIC_BYPASS_SHIFT 1
+#define MADERA_CPMIC_BYPASS_WIDTH 1
+#define MADERA_CPMIC_ENA 0x0001
+#define MADERA_CPMIC_ENA_MASK 0x0001
+#define MADERA_CPMIC_ENA_SHIFT 0
+#define MADERA_CPMIC_ENA_WIDTH 1
+
+/* (0x0210) LDO1_Control_1 */
+#define MADERA_LDO1_VSEL_MASK 0x07E0
+#define MADERA_LDO1_VSEL_SHIFT 5
+#define MADERA_LDO1_VSEL_WIDTH 6
+#define MADERA_LDO1_FAST 0x0010
+#define MADERA_LDO1_FAST_MASK 0x0010
+#define MADERA_LDO1_FAST_SHIFT 4
+#define MADERA_LDO1_FAST_WIDTH 1
+#define MADERA_LDO1_DISCH 0x0004
+#define MADERA_LDO1_DISCH_MASK 0x0004
+#define MADERA_LDO1_DISCH_SHIFT 2
+#define MADERA_LDO1_DISCH_WIDTH 1
+#define MADERA_LDO1_BYPASS 0x0002
+#define MADERA_LDO1_BYPASS_MASK 0x0002
+#define MADERA_LDO1_BYPASS_SHIFT 1
+#define MADERA_LDO1_BYPASS_WIDTH 1
+#define MADERA_LDO1_ENA 0x0001
+#define MADERA_LDO1_ENA_MASK 0x0001
+#define MADERA_LDO1_ENA_SHIFT 0
+#define MADERA_LDO1_ENA_WIDTH 1
+
+/* (0x0213) LDO2_Control_1 */
+#define MADERA_LDO2_VSEL_MASK 0x07E0
+#define MADERA_LDO2_VSEL_SHIFT 5
+#define MADERA_LDO2_VSEL_WIDTH 6
+#define MADERA_LDO2_FAST 0x0010
+#define MADERA_LDO2_FAST_MASK 0x0010
+#define MADERA_LDO2_FAST_SHIFT 4
+#define MADERA_LDO2_FAST_WIDTH 1
+#define MADERA_LDO2_DISCH 0x0004
+#define MADERA_LDO2_DISCH_MASK 0x0004
+#define MADERA_LDO2_DISCH_SHIFT 2
+#define MADERA_LDO2_DISCH_WIDTH 1
+#define MADERA_LDO2_BYPASS 0x0002
+#define MADERA_LDO2_BYPASS_MASK 0x0002
+#define MADERA_LDO2_BYPASS_SHIFT 1
+#define MADERA_LDO2_BYPASS_WIDTH 1
+#define MADERA_LDO2_ENA 0x0001
+#define MADERA_LDO2_ENA_MASK 0x0001
+#define MADERA_LDO2_ENA_SHIFT 0
+#define MADERA_LDO2_ENA_WIDTH 1
+
+/* (0x0218) Mic_Bias_Ctrl_1 */
+#define MADERA_MICB1_ENA 0x0001
+#define MADERA_MICB1_ENA_MASK 0x0001
+#define MADERA_MICB1_ENA_SHIFT 0
+#define MADERA_MICB1_ENA_WIDTH 1
+
+/* (0x021C) Mic_Bias_Ctrl_5 */
+#define MADERA_MICB1D_ENA 0x1000
+#define MADERA_MICB1D_ENA_MASK 0x1000
+#define MADERA_MICB1D_ENA_SHIFT 12
+#define MADERA_MICB1D_ENA_WIDTH 1
+#define MADERA_MICB1C_ENA 0x0100
+#define MADERA_MICB1C_ENA_MASK 0x0100
+#define MADERA_MICB1C_ENA_SHIFT 8
+#define MADERA_MICB1C_ENA_WIDTH 1
+#define MADERA_MICB1B_ENA 0x0010
+#define MADERA_MICB1B_ENA_MASK 0x0010
+#define MADERA_MICB1B_ENA_SHIFT 4
+#define MADERA_MICB1B_ENA_WIDTH 1
+#define MADERA_MICB1A_ENA 0x0001
+#define MADERA_MICB1A_ENA_MASK 0x0001
+#define MADERA_MICB1A_ENA_SHIFT 0
+#define MADERA_MICB1A_ENA_WIDTH 1
+
+/* (0x021E) Mic_Bias_Ctrl_6 */
+#define MADERA_MICB2D_ENA 0x1000
+#define MADERA_MICB2D_ENA_MASK 0x1000
+#define MADERA_MICB2D_ENA_SHIFT 12
+#define MADERA_MICB2D_ENA_WIDTH 1
+#define MADERA_MICB2C_ENA 0x0100
+#define MADERA_MICB2C_ENA_MASK 0x0100
+#define MADERA_MICB2C_ENA_SHIFT 8
+#define MADERA_MICB2C_ENA_WIDTH 1
+#define MADERA_MICB2B_ENA 0x0010
+#define MADERA_MICB2B_ENA_MASK 0x0010
+#define MADERA_MICB2B_ENA_SHIFT 4
+#define MADERA_MICB2B_ENA_WIDTH 1
+#define MADERA_MICB2A_ENA 0x0001
+#define MADERA_MICB2A_ENA_MASK 0x0001
+#define MADERA_MICB2A_ENA_SHIFT 0
+#define MADERA_MICB2A_ENA_WIDTH 1
+
+/* (0x0225) - HP Ctrl 1L */
+#define MADERA_RMV_SHRT_HP1L 0x4000
+#define MADERA_RMV_SHRT_HP1L_MASK 0x4000
+#define MADERA_RMV_SHRT_HP1L_SHIFT 14
+#define MADERA_RMV_SHRT_HP1L_WIDTH 1
+#define MADERA_HP1L_FLWR 0x0004
+#define MADERA_HP1L_FLWR_MASK 0x0004
+#define MADERA_HP1L_FLWR_SHIFT 2
+#define MADERA_HP1L_FLWR_WIDTH 1
+#define MADERA_HP1L_SHRTI 0x0002
+#define MADERA_HP1L_SHRTI_MASK 0x0002
+#define MADERA_HP1L_SHRTI_SHIFT 1
+#define MADERA_HP1L_SHRTI_WIDTH 1
+#define MADERA_HP1L_SHRTO 0x0001
+#define MADERA_HP1L_SHRTO_MASK 0x0001
+#define MADERA_HP1L_SHRTO_SHIFT 0
+#define MADERA_HP1L_SHRTO_WIDTH 1
+
+/* (0x0226) - HP Ctrl 1R */
+#define MADERA_RMV_SHRT_HP1R 0x4000
+#define MADERA_RMV_SHRT_HP1R_MASK 0x4000
+#define MADERA_RMV_SHRT_HP1R_SHIFT 14
+#define MADERA_RMV_SHRT_HP1R_WIDTH 1
+#define MADERA_HP1R_FLWR 0x0004
+#define MADERA_HP1R_FLWR_MASK 0x0004
+#define MADERA_HP1R_FLWR_SHIFT 2
+#define MADERA_HP1R_FLWR_WIDTH 1
+#define MADERA_HP1R_SHRTI 0x0002
+#define MADERA_HP1R_SHRTI_MASK 0x0002
+#define MADERA_HP1R_SHRTI_SHIFT 1
+#define MADERA_HP1R_SHRTI_WIDTH 1
+#define MADERA_HP1R_SHRTO 0x0001
+#define MADERA_HP1R_SHRTO_MASK 0x0001
+#define MADERA_HP1R_SHRTO_SHIFT 0
+#define MADERA_HP1R_SHRTO_WIDTH 1
+
+/* (0x0293) Accessory_Detect_Mode_1 */
+#define MADERA_ACCDET_SRC 0x2000
+#define MADERA_ACCDET_SRC_MASK 0x2000
+#define MADERA_ACCDET_SRC_SHIFT 13
+#define MADERA_ACCDET_SRC_WIDTH 1
+#define MADERA_ACCDET_POLARITY_INV_ENA 0x0080
+#define MADERA_ACCDET_POLARITY_INV_ENA_MASK 0x0080
+#define MADERA_ACCDET_POLARITY_INV_ENA_SHIFT 7
+#define MADERA_ACCDET_POLARITY_INV_ENA_WIDTH 1
+#define MADERA_ACCDET_MODE_MASK 0x0007
+#define MADERA_ACCDET_MODE_SHIFT 0
+#define MADERA_ACCDET_MODE_WIDTH 3
+
+/* (0x0299) Headphone_Detect_0 */
+#define MADERA_HPD_GND_SEL 0x0007
+#define MADERA_HPD_GND_SEL_MASK 0x0007
+#define MADERA_HPD_GND_SEL_SHIFT 0
+#define MADERA_HPD_GND_SEL_WIDTH 3
+#define MADERA_HPD_SENSE_SEL 0x00F0
+#define MADERA_HPD_SENSE_SEL_MASK 0x00F0
+#define MADERA_HPD_SENSE_SEL_SHIFT 4
+#define MADERA_HPD_SENSE_SEL_WIDTH 4
+#define MADERA_HPD_FRC_SEL 0x0F00
+#define MADERA_HPD_FRC_SEL_MASK 0x0F00
+#define MADERA_HPD_FRC_SEL_SHIFT 8
+#define MADERA_HPD_FRC_SEL_WIDTH 4
+#define MADERA_HPD_OUT_SEL 0x7000
+#define MADERA_HPD_OUT_SEL_MASK 0x7000
+#define MADERA_HPD_OUT_SEL_SHIFT 12
+#define MADERA_HPD_OUT_SEL_WIDTH 3
+#define MADERA_HPD_OVD_ENA_SEL 0x8000
+#define MADERA_HPD_OVD_ENA_SEL_MASK 0x8000
+#define MADERA_HPD_OVD_ENA_SEL_SHIFT 15
+#define MADERA_HPD_OVD_ENA_SEL_WIDTH 1
+
+/* (0x029B) Headphone_Detect_1 */
+#define MADERA_HP_IMPEDANCE_RANGE_MASK 0x0600
+#define MADERA_HP_IMPEDANCE_RANGE_SHIFT 9
+#define MADERA_HP_IMPEDANCE_RANGE_WIDTH 2
+#define MADERA_HP_STEP_SIZE 0x0100
+#define MADERA_HP_STEP_SIZE_MASK 0x0100
+#define MADERA_HP_STEP_SIZE_SHIFT 8
+#define MADERA_HP_STEP_SIZE_WIDTH 1
+#define MADERA_HP_CLK_DIV_MASK 0x0018
+#define MADERA_HP_CLK_DIV_SHIFT 3
+#define MADERA_HP_CLK_DIV_WIDTH 2
+#define MADERA_HP_RATE_MASK 0x0006
+#define MADERA_HP_RATE_SHIFT 1
+#define MADERA_HP_RATE_WIDTH 2
+#define MADERA_HP_POLL 0x0001
+#define MADERA_HP_POLL_MASK 0x0001
+#define MADERA_HP_POLL_SHIFT 0
+#define MADERA_HP_POLL_WIDTH 1
+
+/* (0x029C) Headphone_Detect_2 */
+#define MADERA_HP_DONE_MASK 0x8000
+#define MADERA_HP_DONE_SHIFT 15
+#define MADERA_HP_DONE_WIDTH 1
+#define MADERA_HP_LVL_MASK 0x7FFF
+#define MADERA_HP_LVL_SHIFT 0
+#define MADERA_HP_LVL_WIDTH 15
+
+/* (0x029D) Headphone_Detect_3 */
+#define MADERA_HP_DACVAL_MASK 0x03FF
+#define MADERA_HP_DACVAL_SHIFT 0
+#define MADERA_HP_DACVAL_WIDTH 10
+
+/* (0x029F) - Headphone Detect 5 */
+#define MADERA_HP_DACVAL_DOWN_MASK 0x03FF
+#define MADERA_HP_DACVAL_DOWN_SHIFT 0
+#define MADERA_HP_DACVAL_DOWN_WIDTH 10
+
+/* (0x02A2) Mic_Detect_1_Control_0 */
+#define MADERA_MICD1_GND_MASK 0x0007
+#define MADERA_MICD1_GND_SHIFT 0
+#define MADERA_MICD1_GND_WIDTH 3
+#define MADERA_MICD1_SENSE_MASK 0x00F0
+#define MADERA_MICD1_SENSE_SHIFT 4
+#define MADERA_MICD1_SENSE_WIDTH 4
+#define MADERA_MICD1_ADC_MODE_MASK 0x8000
+#define MADERA_MICD1_ADC_MODE_SHIFT 15
+#define MADERA_MICD1_ADC_MODE_WIDTH 1
+
+/* (0x02A3) Mic_Detect_1_Control_1 */
+#define MADERA_MICD_BIAS_STARTTIME_MASK 0xF000
+#define MADERA_MICD_BIAS_STARTTIME_SHIFT 12
+#define MADERA_MICD_BIAS_STARTTIME_WIDTH 4
+#define MADERA_MICD_RATE_MASK 0x0F00
+#define MADERA_MICD_RATE_SHIFT 8
+#define MADERA_MICD_RATE_WIDTH 4
+#define MADERA_MICD_BIAS_SRC_MASK 0x00F0
+#define MADERA_MICD_BIAS_SRC_SHIFT 4
+#define MADERA_MICD_BIAS_SRC_WIDTH 4
+#define MADERA_MICD_DBTIME 0x0002
+#define MADERA_MICD_DBTIME_MASK 0x0002
+#define MADERA_MICD_DBTIME_SHIFT 1
+#define MADERA_MICD_DBTIME_WIDTH 1
+#define MADERA_MICD_ENA 0x0001
+#define MADERA_MICD_ENA_MASK 0x0001
+#define MADERA_MICD_ENA_SHIFT 0
+#define MADERA_MICD_ENA_WIDTH 1
+
+/* (0x02A4) Mic_Detect_1_Control_2 */
+#define MADERA_MICD_LVL_SEL_MASK 0x00FF
+#define MADERA_MICD_LVL_SEL_SHIFT 0
+#define MADERA_MICD_LVL_SEL_WIDTH 8
+
+/* (0x02A5) Mic_Detect_1_Control_3 */
+#define MADERA_MICD_LVL_0 0x0004
+#define MADERA_MICD_LVL_1 0x0008
+#define MADERA_MICD_LVL_2 0x0010
+#define MADERA_MICD_LVL_3 0x0020
+#define MADERA_MICD_LVL_4 0x0040
+#define MADERA_MICD_LVL_5 0x0080
+#define MADERA_MICD_LVL_6 0x0100
+#define MADERA_MICD_LVL_7 0x0200
+#define MADERA_MICD_LVL_8 0x0400
+#define MADERA_MICD_LVL_MASK 0x07FC
+#define MADERA_MICD_LVL_SHIFT 2
+#define MADERA_MICD_LVL_WIDTH 9
+#define MADERA_MICD_VALID 0x0002
+#define MADERA_MICD_VALID_MASK 0x0002
+#define MADERA_MICD_VALID_SHIFT 1
+#define MADERA_MICD_VALID_WIDTH 1
+#define MADERA_MICD_STS 0x0001
+#define MADERA_MICD_STS_MASK 0x0001
+#define MADERA_MICD_STS_SHIFT 0
+#define MADERA_MICD_STS_WIDTH 1
+
+/* (0x02AB) Mic_Detect_1_Control_4 */
+#define MADERA_MICDET_ADCVAL_DIFF_MASK 0xFF00
+#define MADERA_MICDET_ADCVAL_DIFF_SHIFT 8
+#define MADERA_MICDET_ADCVAL_DIFF_WIDTH 8
+#define MADERA_MICDET_ADCVAL_MASK 0x007F
+#define MADERA_MICDET_ADCVAL_SHIFT 0
+#define MADERA_MICDET_ADCVAL_WIDTH 7
+
+/* (0x02C6) Micd_Clamp_control */
+#define MADERA_MICD_CLAMP_OVD 0x0010
+#define MADERA_MICD_CLAMP_OVD_MASK 0x0010
+#define MADERA_MICD_CLAMP_OVD_SHIFT 4
+#define MADERA_MICD_CLAMP_OVD_WIDTH 1
+#define MADERA_MICD_CLAMP_MODE_MASK 0x000F
+#define MADERA_MICD_CLAMP_MODE_SHIFT 0
+#define MADERA_MICD_CLAMP_MODE_WIDTH 4
+
+/* (0x02C8) GP_Switch_1 */
+#define MADERA_SW2_MODE_MASK 0x000C
+#define MADERA_SW2_MODE_SHIFT 2
+#define MADERA_SW2_MODE_WIDTH 2
+#define MADERA_SW1_MODE_MASK 0x0003
+#define MADERA_SW1_MODE_SHIFT 0
+#define MADERA_SW1_MODE_WIDTH 2
+
+/* (0x02D3) Jack_detect_analogue */
+#define MADERA_JD2_ENA 0x0002
+#define MADERA_JD2_ENA_MASK 0x0002
+#define MADERA_JD2_ENA_SHIFT 1
+#define MADERA_JD2_ENA_WIDTH 1
+#define MADERA_JD1_ENA 0x0001
+#define MADERA_JD1_ENA_MASK 0x0001
+#define MADERA_JD1_ENA_SHIFT 0
+#define MADERA_JD1_ENA_WIDTH 1
+
+/* (0x0300) Input_Enables */
+#define MADERA_IN6L_ENA 0x0800
+#define MADERA_IN6L_ENA_MASK 0x0800
+#define MADERA_IN6L_ENA_SHIFT 11
+#define MADERA_IN6L_ENA_WIDTH 1
+#define MADERA_IN6R_ENA 0x0400
+#define MADERA_IN6R_ENA_MASK 0x0400
+#define MADERA_IN6R_ENA_SHIFT 10
+#define MADERA_IN6R_ENA_WIDTH 1
+#define MADERA_IN5L_ENA 0x0200
+#define MADERA_IN5L_ENA_MASK 0x0200
+#define MADERA_IN5L_ENA_SHIFT 9
+#define MADERA_IN5L_ENA_WIDTH 1
+#define MADERA_IN5R_ENA 0x0100
+#define MADERA_IN5R_ENA_MASK 0x0100
+#define MADERA_IN5R_ENA_SHIFT 8
+#define MADERA_IN5R_ENA_WIDTH 1
+#define MADERA_IN4L_ENA 0x0080
+#define MADERA_IN4L_ENA_MASK 0x0080
+#define MADERA_IN4L_ENA_SHIFT 7
+#define MADERA_IN4L_ENA_WIDTH 1
+#define MADERA_IN4R_ENA 0x0040
+#define MADERA_IN4R_ENA_MASK 0x0040
+#define MADERA_IN4R_ENA_SHIFT 6
+#define MADERA_IN4R_ENA_WIDTH 1
+#define MADERA_IN3L_ENA 0x0020
+#define MADERA_IN3L_ENA_MASK 0x0020
+#define MADERA_IN3L_ENA_SHIFT 5
+#define MADERA_IN3L_ENA_WIDTH 1
+#define MADERA_IN3R_ENA 0x0010
+#define MADERA_IN3R_ENA_MASK 0x0010
+#define MADERA_IN3R_ENA_SHIFT 4
+#define MADERA_IN3R_ENA_WIDTH 1
+#define MADERA_IN2L_ENA 0x0008
+#define MADERA_IN2L_ENA_MASK 0x0008
+#define MADERA_IN2L_ENA_SHIFT 3
+#define MADERA_IN2L_ENA_WIDTH 1
+#define MADERA_IN2R_ENA 0x0004
+#define MADERA_IN2R_ENA_MASK 0x0004
+#define MADERA_IN2R_ENA_SHIFT 2
+#define MADERA_IN2R_ENA_WIDTH 1
+#define MADERA_IN1L_ENA 0x0002
+#define MADERA_IN1L_ENA_MASK 0x0002
+#define MADERA_IN1L_ENA_SHIFT 1
+#define MADERA_IN1L_ENA_WIDTH 1
+#define MADERA_IN1R_ENA 0x0001
+#define MADERA_IN1R_ENA_MASK 0x0001
+#define MADERA_IN1R_ENA_SHIFT 0
+#define MADERA_IN1R_ENA_WIDTH 1
+
+/* (0x0308) Input_Rate */
+#define MADERA_IN_RATE_MASK 0xF800
+#define MADERA_IN_RATE_SHIFT 11
+#define MADERA_IN_RATE_WIDTH 5
+#define MADERA_IN_MODE_MASK 0x0400
+#define MADERA_IN_MODE_SHIFT 10
+#define MADERA_IN_MODE_WIDTH 1
+
+/* (0x0309) Input_Volume_Ramp */
+#define MADERA_IN_VD_RAMP_MASK 0x0070
+#define MADERA_IN_VD_RAMP_SHIFT 4
+#define MADERA_IN_VD_RAMP_WIDTH 3
+#define MADERA_IN_VI_RAMP_MASK 0x0007
+#define MADERA_IN_VI_RAMP_SHIFT 0
+#define MADERA_IN_VI_RAMP_WIDTH 3
+
+/* (0x030C) HPF_Control */
+#define MADERA_IN_HPF_CUT_MASK 0x0007
+#define MADERA_IN_HPF_CUT_SHIFT 0
+#define MADERA_IN_HPF_CUT_WIDTH 3
+
+/* (0x0310) IN1L_Control */
+#define MADERA_IN1L_HPF_MASK 0x8000
+#define MADERA_IN1L_HPF_SHIFT 15
+#define MADERA_IN1L_HPF_WIDTH 1
+#define MADERA_IN1_DMIC_SUP_MASK 0x1800
+#define MADERA_IN1_DMIC_SUP_SHIFT 11
+#define MADERA_IN1_DMIC_SUP_WIDTH 2
+#define MADERA_IN1_MODE_MASK 0x0400
+#define MADERA_IN1_MODE_SHIFT 10
+#define MADERA_IN1_MODE_WIDTH 1
+#define MADERA_IN1L_PGA_VOL_MASK 0x00FE
+#define MADERA_IN1L_PGA_VOL_SHIFT 1
+#define MADERA_IN1L_PGA_VOL_WIDTH 7
+
+/* (0x0311) ADC_Digital_Volume_1L */
+#define MADERA_IN1L_SRC_MASK 0x4000
+#define MADERA_IN1L_SRC_SHIFT 14
+#define MADERA_IN1L_SRC_WIDTH 1
+#define MADERA_IN1L_SRC_SE_MASK 0x2000
+#define MADERA_IN1L_SRC_SE_SHIFT 13
+#define MADERA_IN1L_SRC_SE_WIDTH 1
+#define MADERA_IN1L_LP_MODE 0x0800
+#define MADERA_IN1L_LP_MODE_MASK 0x0800
+#define MADERA_IN1L_LP_MODE_SHIFT 11
+#define MADERA_IN1L_LP_MODE_WIDTH 1
+#define MADERA_IN_VU 0x0200
+#define MADERA_IN_VU_MASK 0x0200
+#define MADERA_IN_VU_SHIFT 9
+#define MADERA_IN_VU_WIDTH 1
+#define MADERA_IN1L_MUTE 0x0100
+#define MADERA_IN1L_MUTE_MASK 0x0100
+#define MADERA_IN1L_MUTE_SHIFT 8
+#define MADERA_IN1L_MUTE_WIDTH 1
+#define MADERA_IN1L_DIG_VOL_MASK 0x00FF
+#define MADERA_IN1L_DIG_VOL_SHIFT 0
+#define MADERA_IN1L_DIG_VOL_WIDTH 8
+
+/* (0x0312) DMIC1L_Control */
+#define MADERA_IN1_OSR_MASK 0x0700
+#define MADERA_IN1_OSR_SHIFT 8
+#define MADERA_IN1_OSR_WIDTH 3
+
+/* (0x0313) IN1L_Rate_Control */
+#define MADERA_IN1L_RATE_MASK 0xF800
+#define MADERA_IN1L_RATE_SHIFT 11
+#define MADERA_IN1L_RATE_WIDTH 5
+
+/* (0x0314) IN1R_Control */
+#define MADERA_IN1R_HPF_MASK 0x8000
+#define MADERA_IN1R_HPF_SHIFT 15
+#define MADERA_IN1R_HPF_WIDTH 1
+#define MADERA_IN1R_PGA_VOL_MASK 0x00FE
+#define MADERA_IN1R_PGA_VOL_SHIFT 1
+#define MADERA_IN1R_PGA_VOL_WIDTH 7
+#define MADERA_IN1_DMICCLK_SRC_MASK 0x1800
+#define MADERA_IN1_DMICCLK_SRC_SHIFT 11
+#define MADERA_IN1_DMICCLK_SRC_WIDTH 2
+
+/* (0x0315) ADC_Digital_Volume_1R */
+#define MADERA_IN1R_SRC_MASK 0x4000
+#define MADERA_IN1R_SRC_SHIFT 14
+#define MADERA_IN1R_SRC_WIDTH 1
+#define MADERA_IN1R_SRC_SE_MASK 0x2000
+#define MADERA_IN1R_SRC_SE_SHIFT 13
+#define MADERA_IN1R_SRC_SE_WIDTH 1
+#define MADERA_IN1R_LP_MODE 0x0800
+#define MADERA_IN1R_LP_MODE_MASK 0x0800
+#define MADERA_IN1R_LP_MODE_SHIFT 11
+#define MADERA_IN1R_LP_MODE_WIDTH 1
+#define MADERA_IN1R_MUTE 0x0100
+#define MADERA_IN1R_MUTE_MASK 0x0100
+#define MADERA_IN1R_MUTE_SHIFT 8
+#define MADERA_IN1R_MUTE_WIDTH 1
+#define MADERA_IN1R_DIG_VOL_MASK 0x00FF
+#define MADERA_IN1R_DIG_VOL_SHIFT 0
+#define MADERA_IN1R_DIG_VOL_WIDTH 8
+
+/* (0x0317) IN1R_Rate_Control */
+#define MADERA_IN1R_RATE_MASK 0xF800
+#define MADERA_IN1R_RATE_SHIFT 11
+#define MADERA_IN1R_RATE_WIDTH 5
+
+/* (0x0318) IN2L_Control */
+#define MADERA_IN2L_HPF_MASK 0x8000
+#define MADERA_IN2L_HPF_SHIFT 15
+#define MADERA_IN2L_HPF_WIDTH 1
+#define MADERA_IN2_DMIC_SUP_MASK 0x1800
+#define MADERA_IN2_DMIC_SUP_SHIFT 11
+#define MADERA_IN2_DMIC_SUP_WIDTH 2
+#define MADERA_IN2_MODE_MASK 0x0400
+#define MADERA_IN2_MODE_SHIFT 10
+#define MADERA_IN2_MODE_WIDTH 1
+#define MADERA_IN2L_PGA_VOL_MASK 0x00FE
+#define MADERA_IN2L_PGA_VOL_SHIFT 1
+#define MADERA_IN2L_PGA_VOL_WIDTH 7
+
+/* (0x0319) ADC_Digital_Volume_2L */
+#define MADERA_IN2L_SRC_MASK 0x4000
+#define MADERA_IN2L_SRC_SHIFT 14
+#define MADERA_IN2L_SRC_WIDTH 1
+#define MADERA_IN2L_SRC_SE_MASK 0x2000
+#define MADERA_IN2L_SRC_SE_SHIFT 13
+#define MADERA_IN2L_SRC_SE_WIDTH 1
+#define MADERA_IN2L_LP_MODE 0x0800
+#define MADERA_IN2L_LP_MODE_MASK 0x0800
+#define MADERA_IN2L_LP_MODE_SHIFT 11
+#define MADERA_IN2L_LP_MODE_WIDTH 1
+#define MADERA_IN2L_MUTE 0x0100
+#define MADERA_IN2L_MUTE_MASK 0x0100
+#define MADERA_IN2L_MUTE_SHIFT 8
+#define MADERA_IN2L_MUTE_WIDTH 1
+#define MADERA_IN2L_DIG_VOL_MASK 0x00FF
+#define MADERA_IN2L_DIG_VOL_SHIFT 0
+#define MADERA_IN2L_DIG_VOL_WIDTH 8
+
+/* (0x031A) DMIC2L_Control */
+#define MADERA_IN2_OSR_MASK 0x0700
+#define MADERA_IN2_OSR_SHIFT 8
+#define MADERA_IN2_OSR_WIDTH 3
+
+/* (0x031C) IN2R_Control */
+#define MADERA_IN2R_HPF_MASK 0x8000
+#define MADERA_IN2R_HPF_SHIFT 15
+#define MADERA_IN2R_HPF_WIDTH 1
+#define MADERA_IN2R_PGA_VOL_MASK 0x00FE
+#define MADERA_IN2R_PGA_VOL_SHIFT 1
+#define MADERA_IN2R_PGA_VOL_WIDTH 7
+#define MADERA_IN2_DMICCLK_SRC_MASK 0x1800
+#define MADERA_IN2_DMICCLK_SRC_SHIFT 11
+#define MADERA_IN2_DMICCLK_SRC_WIDTH 2
+
+/* (0x031D) ADC_Digital_Volume_2R */
+#define MADERA_IN2R_SRC_MASK 0x4000
+#define MADERA_IN2R_SRC_SHIFT 14
+#define MADERA_IN2R_SRC_WIDTH 1
+#define MADERA_IN2R_SRC_SE_MASK 0x2000
+#define MADERA_IN2R_SRC_SE_SHIFT 13
+#define MADERA_IN2R_SRC_SE_WIDTH 1
+#define MADERA_IN2R_LP_MODE 0x0800
+#define MADERA_IN2R_LP_MODE_MASK 0x0800
+#define MADERA_IN2R_LP_MODE_SHIFT 11
+#define MADERA_IN2R_LP_MODE_WIDTH 1
+#define MADERA_IN2R_MUTE 0x0100
+#define MADERA_IN2R_MUTE_MASK 0x0100
+#define MADERA_IN2R_MUTE_SHIFT 8
+#define MADERA_IN2R_MUTE_WIDTH 1
+#define MADERA_IN2R_DIG_VOL_MASK 0x00FF
+#define MADERA_IN2R_DIG_VOL_SHIFT 0
+#define MADERA_IN2R_DIG_VOL_WIDTH 8
+
+/* (0x0320) IN3L_Control */
+#define MADERA_IN3L_HPF_MASK 0x8000
+#define MADERA_IN3L_HPF_SHIFT 15
+#define MADERA_IN3L_HPF_WIDTH 1
+#define MADERA_IN3_DMIC_SUP_MASK 0x1800
+#define MADERA_IN3_DMIC_SUP_SHIFT 11
+#define MADERA_IN3_DMIC_SUP_WIDTH 2
+#define MADERA_IN3_MODE_MASK 0x0400
+#define MADERA_IN3_MODE_SHIFT 10
+#define MADERA_IN3_MODE_WIDTH 1
+#define MADERA_IN3L_PGA_VOL_MASK 0x00FE
+#define MADERA_IN3L_PGA_VOL_SHIFT 1
+#define MADERA_IN3L_PGA_VOL_WIDTH 7
+
+/* (0x0321) ADC_Digital_Volume_3L */
+#define MADERA_IN3L_MUTE 0x0100
+#define MADERA_IN3L_MUTE_MASK 0x0100
+#define MADERA_IN3L_MUTE_SHIFT 8
+#define MADERA_IN3L_MUTE_WIDTH 1
+#define MADERA_IN3L_DIG_VOL_MASK 0x00FF
+#define MADERA_IN3L_DIG_VOL_SHIFT 0
+#define MADERA_IN3L_DIG_VOL_WIDTH 8
+
+/* (0x0322) DMIC3L_Control */
+#define MADERA_IN3_OSR_MASK 0x0700
+#define MADERA_IN3_OSR_SHIFT 8
+#define MADERA_IN3_OSR_WIDTH 3
+
+/* (0x0324) IN3R_Control */
+#define MADERA_IN3R_HPF_MASK 0x8000
+#define MADERA_IN3R_HPF_SHIFT 15
+#define MADERA_IN3R_HPF_WIDTH 1
+#define MADERA_IN3R_PGA_VOL_MASK 0x00FE
+#define MADERA_IN3R_PGA_VOL_SHIFT 1
+#define MADERA_IN3R_PGA_VOL_WIDTH 7
+#define MADERA_IN3_DMICCLK_SRC_MASK 0x1800
+#define MADERA_IN3_DMICCLK_SRC_SHIFT 11
+#define MADERA_IN3_DMICCLK_SRC_WIDTH 2
+
+/* (0x0325) ADC_Digital_Volume_3R */
+#define MADERA_IN3R_MUTE 0x0100
+#define MADERA_IN3R_MUTE_MASK 0x0100
+#define MADERA_IN3R_MUTE_SHIFT 8
+#define MADERA_IN3R_MUTE_WIDTH 1
+#define MADERA_IN3R_DIG_VOL_MASK 0x00FF
+#define MADERA_IN3R_DIG_VOL_SHIFT 0
+#define MADERA_IN3R_DIG_VOL_WIDTH 8
+
+/* (0x0328) IN4L_Control */
+#define MADERA_IN4L_HPF_MASK 0x8000
+#define MADERA_IN4L_HPF_SHIFT 15
+#define MADERA_IN4L_HPF_WIDTH 1
+#define MADERA_IN4_DMIC_SUP_MASK 0x1800
+#define MADERA_IN4_DMIC_SUP_SHIFT 11
+#define MADERA_IN4_DMIC_SUP_WIDTH 2
+
+/* (0x0329) ADC_Digital_Volume_4L */
+#define MADERA_IN4L_MUTE 0x0100
+#define MADERA_IN4L_MUTE_MASK 0x0100
+#define MADERA_IN4L_MUTE_SHIFT 8
+#define MADERA_IN4L_MUTE_WIDTH 1
+#define MADERA_IN4L_DIG_VOL_MASK 0x00FF
+#define MADERA_IN4L_DIG_VOL_SHIFT 0
+#define MADERA_IN4L_DIG_VOL_WIDTH 8
+
+/* (0x032A) DMIC4L_Control */
+#define MADERA_IN4_OSR_MASK 0x0700
+#define MADERA_IN4_OSR_SHIFT 8
+#define MADERA_IN4_OSR_WIDTH 3
+
+/* (0x032C) IN4R_Control */
+#define MADERA_IN4R_HPF_MASK 0x8000
+#define MADERA_IN4R_HPF_SHIFT 15
+#define MADERA_IN4R_HPF_WIDTH 1
+#define MADERA_IN4_DMICCLK_SRC_MASK 0x1800
+#define MADERA_IN4_DMICCLK_SRC_SHIFT 11
+#define MADERA_IN4_DMICCLK_SRC_WIDTH 2
+
+/* (0x032D) ADC_Digital_Volume_4R */
+#define MADERA_IN4R_MUTE 0x0100
+#define MADERA_IN4R_MUTE_MASK 0x0100
+#define MADERA_IN4R_MUTE_SHIFT 8
+#define MADERA_IN4R_MUTE_WIDTH 1
+#define MADERA_IN4R_DIG_VOL_MASK 0x00FF
+#define MADERA_IN4R_DIG_VOL_SHIFT 0
+#define MADERA_IN4R_DIG_VOL_WIDTH 8
+
+/* (0x0330) IN5L_Control */
+#define MADERA_IN5L_HPF_MASK 0x8000
+#define MADERA_IN5L_HPF_SHIFT 15
+#define MADERA_IN5L_HPF_WIDTH 1
+#define MADERA_IN5_DMIC_SUP_MASK 0x1800
+#define MADERA_IN5_DMIC_SUP_SHIFT 11
+#define MADERA_IN5_DMIC_SUP_WIDTH 2
+
+/* (0x0331) ADC_Digital_Volume_5L */
+#define MADERA_IN5L_MUTE 0x0100
+#define MADERA_IN5L_MUTE_MASK 0x0100
+#define MADERA_IN5L_MUTE_SHIFT 8
+#define MADERA_IN5L_MUTE_WIDTH 1
+#define MADERA_IN5L_DIG_VOL_MASK 0x00FF
+#define MADERA_IN5L_DIG_VOL_SHIFT 0
+#define MADERA_IN5L_DIG_VOL_WIDTH 8
+
+/* (0x0332) DMIC5L_Control */
+#define MADERA_IN5_OSR_MASK 0x0700
+#define MADERA_IN5_OSR_SHIFT 8
+#define MADERA_IN5_OSR_WIDTH 3
+
+/* (0x0334) IN5R_Control */
+#define MADERA_IN5R_HPF_MASK 0x8000
+#define MADERA_IN5R_HPF_SHIFT 15
+#define MADERA_IN5R_HPF_WIDTH 1
+#define MADERA_IN5_DMICCLK_SRC_MASK 0x1800
+#define MADERA_IN5_DMICCLK_SRC_SHIFT 11
+#define MADERA_IN5_DMICCLK_SRC_WIDTH 2
+
+/* (0x0335) ADC_Digital_Volume_5R */
+#define MADERA_IN5R_MUTE 0x0100
+#define MADERA_IN5R_MUTE_MASK 0x0100
+#define MADERA_IN5R_MUTE_SHIFT 8
+#define MADERA_IN5R_MUTE_WIDTH 1
+#define MADERA_IN5R_DIG_VOL_MASK 0x00FF
+#define MADERA_IN5R_DIG_VOL_SHIFT 0
+#define MADERA_IN5R_DIG_VOL_WIDTH 8
+
+/* (0x0338) IN6L_Control */
+#define MADERA_IN6L_HPF_MASK 0x8000
+#define MADERA_IN6L_HPF_SHIFT 15
+#define MADERA_IN6L_HPF_WIDTH 1
+#define MADERA_IN6_DMIC_SUP_MASK 0x1800
+#define MADERA_IN6_DMIC_SUP_SHIFT 11
+#define MADERA_IN6_DMIC_SUP_WIDTH 2
+
+/* (0x0339) ADC_Digital_Volume_6L */
+#define MADERA_IN6L_MUTE 0x0100
+#define MADERA_IN6L_MUTE_MASK 0x0100
+#define MADERA_IN6L_MUTE_SHIFT 8
+#define MADERA_IN6L_MUTE_WIDTH 1
+#define MADERA_IN6L_DIG_VOL_MASK 0x00FF
+#define MADERA_IN6L_DIG_VOL_SHIFT 0
+#define MADERA_IN6L_DIG_VOL_WIDTH 8
+
+/* (0x033A) DMIC6L_Control */
+#define MADERA_IN6_OSR_MASK 0x0700
+#define MADERA_IN6_OSR_SHIFT 8
+#define MADERA_IN6_OSR_WIDTH 3
+
+/* (0x033C) IN6R_Control */
+#define MADERA_IN6R_HPF_MASK 0x8000
+#define MADERA_IN6R_HPF_SHIFT 15
+#define MADERA_IN6R_HPF_WIDTH 1
+
+/* (0x033D) ADC_Digital_Volume_6R */
+#define MADERA_IN6R_MUTE 0x0100
+#define MADERA_IN6R_MUTE_MASK 0x0100
+#define MADERA_IN6R_MUTE_SHIFT 8
+#define MADERA_IN6R_MUTE_WIDTH 1
+#define MADERA_IN6R_DIG_VOL_MASK 0x00FF
+#define MADERA_IN6R_DIG_VOL_SHIFT 0
+#define MADERA_IN6R_DIG_VOL_WIDTH 8
+
+/* (0x033E) DMIC6R_Control */
+#define MADERA_IN6_DMICCLK_SRC_MASK 0x1800
+#define MADERA_IN6_DMICCLK_SRC_SHIFT 11
+#define MADERA_IN6_DMICCLK_SRC_WIDTH 2
+
+/* (0x0400) Output_Enables_1 */
+#define MADERA_EP_SEL 0x8000
+#define MADERA_EP_SEL_MASK 0x8000
+#define MADERA_EP_SEL_SHIFT 15
+#define MADERA_EP_SEL_WIDTH 1
+#define MADERA_OUT6L_ENA 0x0800
+#define MADERA_OUT6L_ENA_MASK 0x0800
+#define MADERA_OUT6L_ENA_SHIFT 11
+#define MADERA_OUT6L_ENA_WIDTH 1
+#define MADERA_OUT6R_ENA 0x0400
+#define MADERA_OUT6R_ENA_MASK 0x0400
+#define MADERA_OUT6R_ENA_SHIFT 10
+#define MADERA_OUT6R_ENA_WIDTH 1
+#define MADERA_OUT5L_ENA 0x0200
+#define MADERA_OUT5L_ENA_MASK 0x0200
+#define MADERA_OUT5L_ENA_SHIFT 9
+#define MADERA_OUT5L_ENA_WIDTH 1
+#define MADERA_OUT5R_ENA 0x0100
+#define MADERA_OUT5R_ENA_MASK 0x0100
+#define MADERA_OUT5R_ENA_SHIFT 8
+#define MADERA_OUT5R_ENA_WIDTH 1
+#define MADERA_OUT4L_ENA 0x0080
+#define MADERA_OUT4L_ENA_MASK 0x0080
+#define MADERA_OUT4L_ENA_SHIFT 7
+#define MADERA_OUT4L_ENA_WIDTH 1
+#define MADERA_OUT4R_ENA 0x0040
+#define MADERA_OUT4R_ENA_MASK 0x0040
+#define MADERA_OUT4R_ENA_SHIFT 6
+#define MADERA_OUT4R_ENA_WIDTH 1
+#define MADERA_OUT3L_ENA 0x0020
+#define MADERA_OUT3L_ENA_MASK 0x0020
+#define MADERA_OUT3L_ENA_SHIFT 5
+#define MADERA_OUT3L_ENA_WIDTH 1
+#define MADERA_OUT3R_ENA 0x0010
+#define MADERA_OUT3R_ENA_MASK 0x0010
+#define MADERA_OUT3R_ENA_SHIFT 4
+#define MADERA_OUT3R_ENA_WIDTH 1
+#define MADERA_OUT2L_ENA 0x0008
+#define MADERA_OUT2L_ENA_MASK 0x0008
+#define MADERA_OUT2L_ENA_SHIFT 3
+#define MADERA_OUT2L_ENA_WIDTH 1
+#define MADERA_OUT2R_ENA 0x0004
+#define MADERA_OUT2R_ENA_MASK 0x0004
+#define MADERA_OUT2R_ENA_SHIFT 2
+#define MADERA_OUT2R_ENA_WIDTH 1
+#define MADERA_OUT1L_ENA 0x0002
+#define MADERA_OUT1L_ENA_MASK 0x0002
+#define MADERA_OUT1L_ENA_SHIFT 1
+#define MADERA_OUT1L_ENA_WIDTH 1
+#define MADERA_OUT1R_ENA 0x0001
+#define MADERA_OUT1R_ENA_MASK 0x0001
+#define MADERA_OUT1R_ENA_SHIFT 0
+#define MADERA_OUT1R_ENA_WIDTH 1
+
+/* (0x0409) Output_Volume_Ramp */
+#define MADERA_OUT_VD_RAMP_MASK 0x0070
+#define MADERA_OUT_VD_RAMP_SHIFT 4
+#define MADERA_OUT_VD_RAMP_WIDTH 3
+#define MADERA_OUT_VI_RAMP_MASK 0x0007
+#define MADERA_OUT_VI_RAMP_SHIFT 0
+#define MADERA_OUT_VI_RAMP_WIDTH 3
+
+/* (0x0410) Output_Path_Config_1L */
+#define MADERA_OUT1_MONO 0x1000
+#define MADERA_OUT1_MONO_MASK 0x1000
+#define MADERA_OUT1_MONO_SHIFT 12
+#define MADERA_OUT1_MONO_WIDTH 1
+#define MADERA_OUT1L_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT1L_ANC_SRC_SHIFT 10
+#define MADERA_OUT1L_ANC_SRC_WIDTH 2
+
+/* (0x0411) DAC_Digital_Volume_1L */
+#define MADERA_OUT1L_VU 0x0200
+#define MADERA_OUT1L_VU_MASK 0x0200
+#define MADERA_OUT1L_VU_SHIFT 9
+#define MADERA_OUT1L_VU_WIDTH 1
+#define MADERA_OUT1L_MUTE 0x0100
+#define MADERA_OUT1L_MUTE_MASK 0x0100
+#define MADERA_OUT1L_MUTE_SHIFT 8
+#define MADERA_OUT1L_MUTE_WIDTH 1
+#define MADERA_OUT1L_VOL_MASK 0x00FF
+#define MADERA_OUT1L_VOL_SHIFT 0
+#define MADERA_OUT1L_VOL_WIDTH 8
+
+/* (0x0412) Output_Path_Config_1 */
+#define MADERA_HP1_GND_SEL_MASK 0x0007
+#define MADERA_HP1_GND_SEL_SHIFT 0
+#define MADERA_HP1_GND_SEL_WIDTH 3
+
+/* (0x0414) Output_Path_Config_1R */
+#define MADERA_OUT1R_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT1R_ANC_SRC_SHIFT 10
+#define MADERA_OUT1R_ANC_SRC_WIDTH 2
+
+/* (0x0415) DAC_Digital_Volume_1R */
+#define MADERA_OUT1R_MUTE 0x0100
+#define MADERA_OUT1R_MUTE_MASK 0x0100
+#define MADERA_OUT1R_MUTE_SHIFT 8
+#define MADERA_OUT1R_MUTE_WIDTH 1
+#define MADERA_OUT1R_VOL_MASK 0x00FF
+#define MADERA_OUT1R_VOL_SHIFT 0
+#define MADERA_OUT1R_VOL_WIDTH 8
+
+/* (0x0418) Output_Path_Config_2L */
+#define MADERA_OUT2L_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT2L_ANC_SRC_SHIFT 10
+#define MADERA_OUT2L_ANC_SRC_WIDTH 2
+
+/* (0x0419) DAC_Digital_Volume_2L */
+#define MADERA_OUT2L_MUTE 0x0100
+#define MADERA_OUT2L_MUTE_MASK 0x0100
+#define MADERA_OUT2L_MUTE_SHIFT 8
+#define MADERA_OUT2L_MUTE_WIDTH 1
+#define MADERA_OUT2L_VOL_MASK 0x00FF
+#define MADERA_OUT2L_VOL_SHIFT 0
+#define MADERA_OUT2L_VOL_WIDTH 8
+
+/* (0x041A) Output_Path_Config_2 */
+#define MADERA_HP2_GND_SEL_MASK 0x0007
+#define MADERA_HP2_GND_SEL_SHIFT 0
+#define MADERA_HP2_GND_SEL_WIDTH 3
+
+/* (0x041C) Output_Path_Config_2R */
+#define MADERA_OUT2R_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT2R_ANC_SRC_SHIFT 10
+#define MADERA_OUT2R_ANC_SRC_WIDTH 2
+
+/* (0x041D) DAC_Digital_Volume_2R */
+#define MADERA_OUT2R_MUTE 0x0100
+#define MADERA_OUT2R_MUTE_MASK 0x0100
+#define MADERA_OUT2R_MUTE_SHIFT 8
+#define MADERA_OUT2R_MUTE_WIDTH 1
+#define MADERA_OUT2R_VOL_MASK 0x00FF
+#define MADERA_OUT2R_VOL_SHIFT 0
+#define MADERA_OUT2R_VOL_WIDTH 8
+
+/* (0x0420) Output_Path_Config_3L */
+#define MADERA_OUT3L_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT3L_ANC_SRC_SHIFT 10
+#define MADERA_OUT3L_ANC_SRC_WIDTH 2
+
+/* (0x0421) DAC_Digital_Volume_3L */
+#define MADERA_OUT3L_MUTE 0x0100
+#define MADERA_OUT3L_MUTE_MASK 0x0100
+#define MADERA_OUT3L_MUTE_SHIFT 8
+#define MADERA_OUT3L_MUTE_WIDTH 1
+#define MADERA_OUT3L_VOL_MASK 0x00FF
+#define MADERA_OUT3L_VOL_SHIFT 0
+#define MADERA_OUT3L_VOL_WIDTH 8
+
+/* (0x0424) Output_Path_Config_3R */
+#define MADERA_OUT3R_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT3R_ANC_SRC_SHIFT 10
+#define MADERA_OUT3R_ANC_SRC_WIDTH 2
+
+/* (0x0425) DAC_Digital_Volume_3R */
+#define MADERA_OUT3R_MUTE 0x0100
+#define MADERA_OUT3R_MUTE_MASK 0x0100
+#define MADERA_OUT3R_MUTE_SHIFT 8
+#define MADERA_OUT3R_MUTE_WIDTH 1
+#define MADERA_OUT3R_VOL_MASK 0x00FF
+#define MADERA_OUT3R_VOL_SHIFT 0
+#define MADERA_OUT3R_VOL_WIDTH 8
+
+/* (0x0428) Output_Path_Config_4L */
+#define MADERA_OUT4L_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT4L_ANC_SRC_SHIFT 10
+#define MADERA_OUT4L_ANC_SRC_WIDTH 2
+
+/* (0x0429) DAC_Digital_Volume_4L */
+#define MADERA_OUT4L_MUTE 0x0100
+#define MADERA_OUT4L_MUTE_MASK 0x0100
+#define MADERA_OUT4L_MUTE_SHIFT 8
+#define MADERA_OUT4L_MUTE_WIDTH 1
+#define MADERA_OUT4L_VOL_MASK 0x00FF
+#define MADERA_OUT4L_VOL_SHIFT 0
+#define MADERA_OUT4L_VOL_WIDTH 8
+
+/* (0x042C) Output_Path_Config_4R */
+#define MADERA_OUT4R_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT4R_ANC_SRC_SHIFT 10
+#define MADERA_OUT4R_ANC_SRC_WIDTH 2
+
+/* (0x042D) DAC_Digital_Volume_4R */
+#define MADERA_OUT4R_MUTE 0x0100
+#define MADERA_OUT4R_MUTE_MASK 0x0100
+#define MADERA_OUT4R_MUTE_SHIFT 8
+#define MADERA_OUT4R_MUTE_WIDTH 1
+#define MADERA_OUT4R_VOL_MASK 0x00FF
+#define MADERA_OUT4R_VOL_SHIFT 0
+#define MADERA_OUT4R_VOL_WIDTH 8
+
+/* (0x0430) Output_Path_Config_5L */
+#define MADERA_OUT5_OSR 0x2000
+#define MADERA_OUT5_OSR_MASK 0x2000
+#define MADERA_OUT5_OSR_SHIFT 13
+#define MADERA_OUT5_OSR_WIDTH 1
+#define MADERA_OUT5L_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT5L_ANC_SRC_SHIFT 10
+#define MADERA_OUT5L_ANC_SRC_WIDTH 2
+
+/* (0x0431) DAC_Digital_Volume_5L */
+#define MADERA_OUT5L_MUTE 0x0100
+#define MADERA_OUT5L_MUTE_MASK 0x0100
+#define MADERA_OUT5L_MUTE_SHIFT 8
+#define MADERA_OUT5L_MUTE_WIDTH 1
+#define MADERA_OUT5L_VOL_MASK 0x00FF
+#define MADERA_OUT5L_VOL_SHIFT 0
+#define MADERA_OUT5L_VOL_WIDTH 8
+
+/* (0x0434) Output_Path_Config_5R */
+#define MADERA_OUT5R_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT5R_ANC_SRC_SHIFT 10
+#define MADERA_OUT5R_ANC_SRC_WIDTH 2
+
+/* (0x0435) DAC_Digital_Volume_5R */
+#define MADERA_OUT5R_MUTE 0x0100
+#define MADERA_OUT5R_MUTE_MASK 0x0100
+#define MADERA_OUT5R_MUTE_SHIFT 8
+#define MADERA_OUT5R_MUTE_WIDTH 1
+#define MADERA_OUT5R_VOL_MASK 0x00FF
+#define MADERA_OUT5R_VOL_SHIFT 0
+#define MADERA_OUT5R_VOL_WIDTH 8
+
+/* (0x0438) Output_Path_Config_6L */
+#define MADERA_OUT6_OSR 0x2000
+#define MADERA_OUT6_OSR_MASK 0x2000
+#define MADERA_OUT6_OSR_SHIFT 13
+#define MADERA_OUT6_OSR_WIDTH 1
+#define MADERA_OUT6L_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT6L_ANC_SRC_SHIFT 10
+#define MADERA_OUT6L_ANC_SRC_WIDTH 2
+
+/* (0x0439) DAC_Digital_Volume_6L */
+#define MADERA_OUT6L_MUTE 0x0100
+#define MADERA_OUT6L_MUTE_MASK 0x0100
+#define MADERA_OUT6L_MUTE_SHIFT 8
+#define MADERA_OUT6L_MUTE_WIDTH 1
+#define MADERA_OUT6L_VOL_MASK 0x00FF
+#define MADERA_OUT6L_VOL_SHIFT 0
+#define MADERA_OUT6L_VOL_WIDTH 8
+
+/* (0x043C) Output_Path_Config_6R */
+#define MADERA_OUT6R_ANC_SRC_MASK 0x0C00
+#define MADERA_OUT6R_ANC_SRC_SHIFT 10
+#define MADERA_OUT6R_ANC_SRC_WIDTH 2
+
+/* (0x043D) DAC_Digital_Volume_6R */
+#define MADERA_OUT6R_MUTE 0x0100
+#define MADERA_OUT6R_MUTE_MASK 0x0100
+#define MADERA_OUT6R_MUTE_SHIFT 8
+#define MADERA_OUT6R_MUTE_WIDTH 1
+#define MADERA_OUT6R_VOL_MASK 0x00FF
+#define MADERA_OUT6R_VOL_SHIFT 0
+#define MADERA_OUT6R_VOL_WIDTH 8
+
+/* (0x0450) - DAC AEC Control 1 */
+#define MADERA_AEC1_LOOPBACK_SRC_MASK 0x003C
+#define MADERA_AEC1_LOOPBACK_SRC_SHIFT 2
+#define MADERA_AEC1_LOOPBACK_SRC_WIDTH 4
+#define MADERA_AEC1_ENA_STS 0x0002
+#define MADERA_AEC1_ENA_STS_MASK 0x0002
+#define MADERA_AEC1_ENA_STS_SHIFT 1
+#define MADERA_AEC1_ENA_STS_WIDTH 1
+#define MADERA_AEC1_LOOPBACK_ENA 0x0001
+#define MADERA_AEC1_LOOPBACK_ENA_MASK 0x0001
+#define MADERA_AEC1_LOOPBACK_ENA_SHIFT 0
+#define MADERA_AEC1_LOOPBACK_ENA_WIDTH 1
+
+/* (0x0451) DAC_AEC_Control_2 */
+#define MADERA_AEC2_LOOPBACK_SRC_MASK 0x003C
+#define MADERA_AEC2_LOOPBACK_SRC_SHIFT 2
+#define MADERA_AEC2_LOOPBACK_SRC_WIDTH 4
+#define MADERA_AEC2_ENA_STS 0x0002
+#define MADERA_AEC2_ENA_STS_MASK 0x0002
+#define MADERA_AEC2_ENA_STS_SHIFT 1
+#define MADERA_AEC2_ENA_STS_WIDTH 1
+#define MADERA_AEC2_LOOPBACK_ENA 0x0001
+#define MADERA_AEC2_LOOPBACK_ENA_MASK 0x0001
+#define MADERA_AEC2_LOOPBACK_ENA_SHIFT 0
+#define MADERA_AEC2_LOOPBACK_ENA_WIDTH 1
+
+/* (0x0458) Noise_Gate_Control */
+#define MADERA_NGATE_HOLD_MASK 0x0030
+#define MADERA_NGATE_HOLD_SHIFT 4
+#define MADERA_NGATE_HOLD_WIDTH 2
+#define MADERA_NGATE_THR_MASK 0x000E
+#define MADERA_NGATE_THR_SHIFT 1
+#define MADERA_NGATE_THR_WIDTH 3
+#define MADERA_NGATE_ENA 0x0001
+#define MADERA_NGATE_ENA_MASK 0x0001
+#define MADERA_NGATE_ENA_SHIFT 0
+#define MADERA_NGATE_ENA_WIDTH 1
+
+/* (0x0490) PDM_SPK1_CTRL_1 */
+#define MADERA_SPK1R_MUTE 0x2000
+#define MADERA_SPK1R_MUTE_MASK 0x2000
+#define MADERA_SPK1R_MUTE_SHIFT 13
+#define MADERA_SPK1R_MUTE_WIDTH 1
+#define MADERA_SPK1L_MUTE 0x1000
+#define MADERA_SPK1L_MUTE_MASK 0x1000
+#define MADERA_SPK1L_MUTE_SHIFT 12
+#define MADERA_SPK1L_MUTE_WIDTH 1
+#define MADERA_SPK1_MUTE_ENDIAN 0x0100
+#define MADERA_SPK1_MUTE_ENDIAN_MASK 0x0100
+#define MADERA_SPK1_MUTE_ENDIAN_SHIFT 8
+#define MADERA_SPK1_MUTE_ENDIAN_WIDTH 1
+#define MADERA_SPK1_MUTE_SEQ1_MASK 0x00FF
+#define MADERA_SPK1_MUTE_SEQ1_SHIFT 0
+#define MADERA_SPK1_MUTE_SEQ1_WIDTH 8
+
+/* (0x0491) PDM_SPK1_CTRL_2 */
+#define MADERA_SPK1_FMT 0x0001
+#define MADERA_SPK1_FMT_MASK 0x0001
+#define MADERA_SPK1_FMT_SHIFT 0
+#define MADERA_SPK1_FMT_WIDTH 1
+
+/* (0x0492) PDM_SPK2_CTRL_1 */
+#define MADERA_SPK2R_MUTE 0x2000
+#define MADERA_SPK2R_MUTE_MASK 0x2000
+#define MADERA_SPK2R_MUTE_SHIFT 13
+#define MADERA_SPK2R_MUTE_WIDTH 1
+#define MADERA_SPK2L_MUTE 0x1000
+#define MADERA_SPK2L_MUTE_MASK 0x1000
+#define MADERA_SPK2L_MUTE_SHIFT 12
+#define MADERA_SPK2L_MUTE_WIDTH 1
+
+/* (0x04A0) - HP1 Short Circuit Ctrl */
+#define MADERA_HP1_SC_ENA 0x1000
+#define MADERA_HP1_SC_ENA_MASK 0x1000
+#define MADERA_HP1_SC_ENA_SHIFT 12
+#define MADERA_HP1_SC_ENA_WIDTH 1
+
+/* (0x04A1) - HP2 Short Circuit Ctrl */
+#define MADERA_HP2_SC_ENA 0x1000
+#define MADERA_HP2_SC_ENA_MASK 0x1000
+#define MADERA_HP2_SC_ENA_SHIFT 12
+#define MADERA_HP2_SC_ENA_WIDTH 1
+
+/* (0x04A2) - HP3 Short Circuit Ctrl */
+#define MADERA_HP3_SC_ENA 0x1000
+#define MADERA_HP3_SC_ENA_MASK 0x1000
+#define MADERA_HP3_SC_ENA_SHIFT 12
+#define MADERA_HP3_SC_ENA_WIDTH 1
+
+/* (0x04A8) - HP_Test_Ctrl_5 */
+#define MADERA_HP1L_ONEFLT 0x0100
+#define MADERA_HP1L_ONEFLT_MASK 0x0100
+#define MADERA_HP1L_ONEFLT_SHIFT 8
+#define MADERA_HP1L_ONEFLT_WIDTH 1
+
+/* (0x04A9) - HP_Test_Ctrl_6 */
+#define MADERA_HP1R_ONEFLT 0x0100
+#define MADERA_HP1R_ONEFLT_MASK 0x0100
+#define MADERA_HP1R_ONEFLT_SHIFT 8
+#define MADERA_HP1R_ONEFLT_WIDTH 1
+
+/* (0x0500) AIF1_BCLK_Ctrl */
+#define MADERA_AIF1_BCLK_INV 0x0080
+#define MADERA_AIF1_BCLK_INV_MASK 0x0080
+#define MADERA_AIF1_BCLK_INV_SHIFT 7
+#define MADERA_AIF1_BCLK_INV_WIDTH 1
+#define MADERA_AIF1_BCLK_MSTR 0x0020
+#define MADERA_AIF1_BCLK_MSTR_MASK 0x0020
+#define MADERA_AIF1_BCLK_MSTR_SHIFT 5
+#define MADERA_AIF1_BCLK_MSTR_WIDTH 1
+#define MADERA_AIF1_BCLK_FREQ_MASK 0x001F
+#define MADERA_AIF1_BCLK_FREQ_SHIFT 0
+#define MADERA_AIF1_BCLK_FREQ_WIDTH 5
+
+/* (0x0501) AIF1_Tx_Pin_Ctrl */
+#define MADERA_AIF1TX_LRCLK_SRC 0x0008
+#define MADERA_AIF1TX_LRCLK_SRC_MASK 0x0008
+#define MADERA_AIF1TX_LRCLK_SRC_SHIFT 3
+#define MADERA_AIF1TX_LRCLK_SRC_WIDTH 1
+#define MADERA_AIF1TX_LRCLK_INV 0x0004
+#define MADERA_AIF1TX_LRCLK_INV_MASK 0x0004
+#define MADERA_AIF1TX_LRCLK_INV_SHIFT 2
+#define MADERA_AIF1TX_LRCLK_INV_WIDTH 1
+#define MADERA_AIF1TX_LRCLK_MSTR 0x0001
+#define MADERA_AIF1TX_LRCLK_MSTR_MASK 0x0001
+#define MADERA_AIF1TX_LRCLK_MSTR_SHIFT 0
+#define MADERA_AIF1TX_LRCLK_MSTR_WIDTH 1
+
+/* (0x0502) AIF1_Rx_Pin_Ctrl */
+#define MADERA_AIF1RX_LRCLK_INV 0x0004
+#define MADERA_AIF1RX_LRCLK_INV_MASK 0x0004
+#define MADERA_AIF1RX_LRCLK_INV_SHIFT 2
+#define MADERA_AIF1RX_LRCLK_INV_WIDTH 1
+#define MADERA_AIF1RX_LRCLK_FRC 0x0002
+#define MADERA_AIF1RX_LRCLK_FRC_MASK 0x0002
+#define MADERA_AIF1RX_LRCLK_FRC_SHIFT 1
+#define MADERA_AIF1RX_LRCLK_FRC_WIDTH 1
+#define MADERA_AIF1RX_LRCLK_MSTR 0x0001
+#define MADERA_AIF1RX_LRCLK_MSTR_MASK 0x0001
+#define MADERA_AIF1RX_LRCLK_MSTR_SHIFT 0
+#define MADERA_AIF1RX_LRCLK_MSTR_WIDTH 1
+
+/* (0x0503) AIF1_Rate_Ctrl */
+#define MADERA_AIF1_RATE_MASK 0xF800
+#define MADERA_AIF1_RATE_SHIFT 11
+#define MADERA_AIF1_RATE_WIDTH 5
+#define MADERA_AIF1_TRI 0x0040
+#define MADERA_AIF1_TRI_MASK 0x0040
+#define MADERA_AIF1_TRI_SHIFT 6
+#define MADERA_AIF1_TRI_WIDTH 1
+
+/* (0x0504) AIF1_Format */
+#define MADERA_AIF1_FMT_MASK 0x0007
+#define MADERA_AIF1_FMT_SHIFT 0
+#define MADERA_AIF1_FMT_WIDTH 3
+
+/* (0x0506) AIF1_Rx_BCLK_Rate */
+#define MADERA_AIF1RX_BCPF_MASK 0x1FFF
+#define MADERA_AIF1RX_BCPF_SHIFT 0
+#define MADERA_AIF1RX_BCPF_WIDTH 13
+
+/* (0x0507) AIF1_Frame_Ctrl_1 */
+#define MADERA_AIF1TX_WL_MASK 0x3F00
+#define MADERA_AIF1TX_WL_SHIFT 8
+#define MADERA_AIF1TX_WL_WIDTH 6
+#define MADERA_AIF1TX_SLOT_LEN_MASK 0x00FF
+#define MADERA_AIF1TX_SLOT_LEN_SHIFT 0
+#define MADERA_AIF1TX_SLOT_LEN_WIDTH 8
+
+/* (0x0508) AIF1_Frame_Ctrl_2 */
+#define MADERA_AIF1RX_WL_MASK 0x3F00
+#define MADERA_AIF1RX_WL_SHIFT 8
+#define MADERA_AIF1RX_WL_WIDTH 6
+#define MADERA_AIF1RX_SLOT_LEN_MASK 0x00FF
+#define MADERA_AIF1RX_SLOT_LEN_SHIFT 0
+#define MADERA_AIF1RX_SLOT_LEN_WIDTH 8
+
+/* (0x0509) AIF1_Frame_Ctrl_3 */
+#define MADERA_AIF1TX1_SLOT_MASK 0x003F
+#define MADERA_AIF1TX1_SLOT_SHIFT 0
+#define MADERA_AIF1TX1_SLOT_WIDTH 6
+
+/* (0x0519) AIF1_Tx_Enables */
+#define MADERA_AIF1TX8_ENA 0x0080
+#define MADERA_AIF1TX8_ENA_MASK 0x0080
+#define MADERA_AIF1TX8_ENA_SHIFT 7
+#define MADERA_AIF1TX8_ENA_WIDTH 1
+#define MADERA_AIF1TX7_ENA 0x0040
+#define MADERA_AIF1TX7_ENA_MASK 0x0040
+#define MADERA_AIF1TX7_ENA_SHIFT 6
+#define MADERA_AIF1TX7_ENA_WIDTH 1
+#define MADERA_AIF1TX6_ENA 0x0020
+#define MADERA_AIF1TX6_ENA_MASK 0x0020
+#define MADERA_AIF1TX6_ENA_SHIFT 5
+#define MADERA_AIF1TX6_ENA_WIDTH 1
+#define MADERA_AIF1TX5_ENA 0x0010
+#define MADERA_AIF1TX5_ENA_MASK 0x0010
+#define MADERA_AIF1TX5_ENA_SHIFT 4
+#define MADERA_AIF1TX5_ENA_WIDTH 1
+#define MADERA_AIF1TX4_ENA 0x0008
+#define MADERA_AIF1TX4_ENA_MASK 0x0008
+#define MADERA_AIF1TX4_ENA_SHIFT 3
+#define MADERA_AIF1TX4_ENA_WIDTH 1
+#define MADERA_AIF1TX3_ENA 0x0004
+#define MADERA_AIF1TX3_ENA_MASK 0x0004
+#define MADERA_AIF1TX3_ENA_SHIFT 2
+#define MADERA_AIF1TX3_ENA_WIDTH 1
+#define MADERA_AIF1TX2_ENA 0x0002
+#define MADERA_AIF1TX2_ENA_MASK 0x0002
+#define MADERA_AIF1TX2_ENA_SHIFT 1
+#define MADERA_AIF1TX2_ENA_WIDTH 1
+#define MADERA_AIF1TX1_ENA 0x0001
+#define MADERA_AIF1TX1_ENA_MASK 0x0001
+#define MADERA_AIF1TX1_ENA_SHIFT 0
+#define MADERA_AIF1TX1_ENA_WIDTH 1
+
+/* (0x051A) AIF1_Rx_Enables */
+#define MADERA_AIF1RX8_ENA 0x0080
+#define MADERA_AIF1RX8_ENA_MASK 0x0080
+#define MADERA_AIF1RX8_ENA_SHIFT 7
+#define MADERA_AIF1RX8_ENA_WIDTH 1
+#define MADERA_AIF1RX7_ENA 0x0040
+#define MADERA_AIF1RX7_ENA_MASK 0x0040
+#define MADERA_AIF1RX7_ENA_SHIFT 6
+#define MADERA_AIF1RX7_ENA_WIDTH 1
+#define MADERA_AIF1RX6_ENA 0x0020
+#define MADERA_AIF1RX6_ENA_MASK 0x0020
+#define MADERA_AIF1RX6_ENA_SHIFT 5
+#define MADERA_AIF1RX6_ENA_WIDTH 1
+#define MADERA_AIF1RX5_ENA 0x0010
+#define MADERA_AIF1RX5_ENA_MASK 0x0010
+#define MADERA_AIF1RX5_ENA_SHIFT 4
+#define MADERA_AIF1RX5_ENA_WIDTH 1
+#define MADERA_AIF1RX4_ENA 0x0008
+#define MADERA_AIF1RX4_ENA_MASK 0x0008
+#define MADERA_AIF1RX4_ENA_SHIFT 3
+#define MADERA_AIF1RX4_ENA_WIDTH 1
+#define MADERA_AIF1RX3_ENA 0x0004
+#define MADERA_AIF1RX3_ENA_MASK 0x0004
+#define MADERA_AIF1RX3_ENA_SHIFT 2
+#define MADERA_AIF1RX3_ENA_WIDTH 1
+#define MADERA_AIF1RX2_ENA 0x0002
+#define MADERA_AIF1RX2_ENA_MASK 0x0002
+#define MADERA_AIF1RX2_ENA_SHIFT 1
+#define MADERA_AIF1RX2_ENA_WIDTH 1
+#define MADERA_AIF1RX1_ENA 0x0001
+#define MADERA_AIF1RX1_ENA_MASK 0x0001
+#define MADERA_AIF1RX1_ENA_SHIFT 0
+#define MADERA_AIF1RX1_ENA_WIDTH 1
+
+/* (0x0559) AIF2_Tx_Enables */
+#define MADERA_AIF2TX8_ENA 0x0080
+#define MADERA_AIF2TX8_ENA_MASK 0x0080
+#define MADERA_AIF2TX8_ENA_SHIFT 7
+#define MADERA_AIF2TX8_ENA_WIDTH 1
+#define MADERA_AIF2TX7_ENA 0x0040
+#define MADERA_AIF2TX7_ENA_MASK 0x0040
+#define MADERA_AIF2TX7_ENA_SHIFT 6
+#define MADERA_AIF2TX7_ENA_WIDTH 1
+#define MADERA_AIF2TX6_ENA 0x0020
+#define MADERA_AIF2TX6_ENA_MASK 0x0020
+#define MADERA_AIF2TX6_ENA_SHIFT 5
+#define MADERA_AIF2TX6_ENA_WIDTH 1
+#define MADERA_AIF2TX5_ENA 0x0010
+#define MADERA_AIF2TX5_ENA_MASK 0x0010
+#define MADERA_AIF2TX5_ENA_SHIFT 4
+#define MADERA_AIF2TX5_ENA_WIDTH 1
+#define MADERA_AIF2TX4_ENA 0x0008
+#define MADERA_AIF2TX4_ENA_MASK 0x0008
+#define MADERA_AIF2TX4_ENA_SHIFT 3
+#define MADERA_AIF2TX4_ENA_WIDTH 1
+#define MADERA_AIF2TX3_ENA 0x0004
+#define MADERA_AIF2TX3_ENA_MASK 0x0004
+#define MADERA_AIF2TX3_ENA_SHIFT 2
+#define MADERA_AIF2TX3_ENA_WIDTH 1
+#define MADERA_AIF2TX2_ENA 0x0002
+#define MADERA_AIF2TX2_ENA_MASK 0x0002
+#define MADERA_AIF2TX2_ENA_SHIFT 1
+#define MADERA_AIF2TX2_ENA_WIDTH 1
+#define MADERA_AIF2TX1_ENA 0x0001
+#define MADERA_AIF2TX1_ENA_MASK 0x0001
+#define MADERA_AIF2TX1_ENA_SHIFT 0
+#define MADERA_AIF2TX1_ENA_WIDTH 1
+
+/* (0x055A) AIF2_Rx_Enables */
+#define MADERA_AIF2RX8_ENA 0x0080
+#define MADERA_AIF2RX8_ENA_MASK 0x0080
+#define MADERA_AIF2RX8_ENA_SHIFT 7
+#define MADERA_AIF2RX8_ENA_WIDTH 1
+#define MADERA_AIF2RX7_ENA 0x0040
+#define MADERA_AIF2RX7_ENA_MASK 0x0040
+#define MADERA_AIF2RX7_ENA_SHIFT 6
+#define MADERA_AIF2RX7_ENA_WIDTH 1
+#define MADERA_AIF2RX6_ENA 0x0020
+#define MADERA_AIF2RX6_ENA_MASK 0x0020
+#define MADERA_AIF2RX6_ENA_SHIFT 5
+#define MADERA_AIF2RX6_ENA_WIDTH 1
+#define MADERA_AIF2RX5_ENA 0x0010
+#define MADERA_AIF2RX5_ENA_MASK 0x0010
+#define MADERA_AIF2RX5_ENA_SHIFT 4
+#define MADERA_AIF2RX5_ENA_WIDTH 1
+#define MADERA_AIF2RX4_ENA 0x0008
+#define MADERA_AIF2RX4_ENA_MASK 0x0008
+#define MADERA_AIF2RX4_ENA_SHIFT 3
+#define MADERA_AIF2RX4_ENA_WIDTH 1
+#define MADERA_AIF2RX3_ENA 0x0004
+#define MADERA_AIF2RX3_ENA_MASK 0x0004
+#define MADERA_AIF2RX3_ENA_SHIFT 2
+#define MADERA_AIF2RX3_ENA_WIDTH 1
+#define MADERA_AIF2RX2_ENA 0x0002
+#define MADERA_AIF2RX2_ENA_MASK 0x0002
+#define MADERA_AIF2RX2_ENA_SHIFT 1
+#define MADERA_AIF2RX2_ENA_WIDTH 1
+#define MADERA_AIF2RX1_ENA 0x0001
+#define MADERA_AIF2RX1_ENA_MASK 0x0001
+#define MADERA_AIF2RX1_ENA_SHIFT 0
+#define MADERA_AIF2RX1_ENA_WIDTH 1
+
+/* (0x0599) AIF3_Tx_Enables */
+#define MADERA_AIF3TX2_ENA 0x0002
+#define MADERA_AIF3TX2_ENA_MASK 0x0002
+#define MADERA_AIF3TX2_ENA_SHIFT 1
+#define MADERA_AIF3TX2_ENA_WIDTH 1
+#define MADERA_AIF3TX1_ENA 0x0001
+#define MADERA_AIF3TX1_ENA_MASK 0x0001
+#define MADERA_AIF3TX1_ENA_SHIFT 0
+#define MADERA_AIF3TX1_ENA_WIDTH 1
+
+/* (0x059A) AIF3_Rx_Enables */
+#define MADERA_AIF3RX2_ENA 0x0002
+#define MADERA_AIF3RX2_ENA_MASK 0x0002
+#define MADERA_AIF3RX2_ENA_SHIFT 1
+#define MADERA_AIF3RX2_ENA_WIDTH 1
+#define MADERA_AIF3RX1_ENA 0x0001
+#define MADERA_AIF3RX1_ENA_MASK 0x0001
+#define MADERA_AIF3RX1_ENA_SHIFT 0
+#define MADERA_AIF3RX1_ENA_WIDTH 1
+
+/* (0x05B9) AIF4_Tx_Enables */
+#define MADERA_AIF4TX2_ENA 0x0002
+#define MADERA_AIF4TX2_ENA_MASK 0x0002
+#define MADERA_AIF4TX2_ENA_SHIFT 1
+#define MADERA_AIF4TX2_ENA_WIDTH 1
+#define MADERA_AIF4TX1_ENA 0x0001
+#define MADERA_AIF4TX1_ENA_MASK 0x0001
+#define MADERA_AIF4TX1_ENA_SHIFT 0
+#define MADERA_AIF4TX1_ENA_WIDTH 1
+
+/* (0x05BA) AIF4_Rx_Enables */
+#define MADERA_AIF4RX2_ENA 0x0002
+#define MADERA_AIF4RX2_ENA_MASK 0x0002
+#define MADERA_AIF4RX2_ENA_SHIFT 1
+#define MADERA_AIF4RX2_ENA_WIDTH 1
+#define MADERA_AIF4RX1_ENA 0x0001
+#define MADERA_AIF4RX1_ENA_MASK 0x0001
+#define MADERA_AIF4RX1_ENA_SHIFT 0
+#define MADERA_AIF4RX1_ENA_WIDTH 1
+
+/* (0x05C2) SPD1_TX_Control */
+#define MADERA_SPD1_VAL2 0x2000
+#define MADERA_SPD1_VAL2_MASK 0x2000
+#define MADERA_SPD1_VAL2_SHIFT 13
+#define MADERA_SPD1_VAL2_WIDTH 1
+#define MADERA_SPD1_VAL1 0x1000
+#define MADERA_SPD1_VAL1_MASK 0x1000
+#define MADERA_SPD1_VAL1_SHIFT 12
+#define MADERA_SPD1_VAL1_WIDTH 1
+#define MADERA_SPD1_RATE_MASK 0x00F0
+#define MADERA_SPD1_RATE_SHIFT 4
+#define MADERA_SPD1_RATE_WIDTH 4
+#define MADERA_SPD1_ENA 0x0001
+#define MADERA_SPD1_ENA_MASK 0x0001
+#define MADERA_SPD1_ENA_SHIFT 0
+#define MADERA_SPD1_ENA_WIDTH 1
+
+/* (0x05F5) SLIMbus_RX_Channel_Enable */
+#define MADERA_SLIMRX8_ENA 0x0080
+#define MADERA_SLIMRX8_ENA_MASK 0x0080
+#define MADERA_SLIMRX8_ENA_SHIFT 7
+#define MADERA_SLIMRX8_ENA_WIDTH 1
+#define MADERA_SLIMRX7_ENA 0x0040
+#define MADERA_SLIMRX7_ENA_MASK 0x0040
+#define MADERA_SLIMRX7_ENA_SHIFT 6
+#define MADERA_SLIMRX7_ENA_WIDTH 1
+#define MADERA_SLIMRX6_ENA 0x0020
+#define MADERA_SLIMRX6_ENA_MASK 0x0020
+#define MADERA_SLIMRX6_ENA_SHIFT 5
+#define MADERA_SLIMRX6_ENA_WIDTH 1
+#define MADERA_SLIMRX5_ENA 0x0010
+#define MADERA_SLIMRX5_ENA_MASK 0x0010
+#define MADERA_SLIMRX5_ENA_SHIFT 4
+#define MADERA_SLIMRX5_ENA_WIDTH 1
+#define MADERA_SLIMRX4_ENA 0x0008
+#define MADERA_SLIMRX4_ENA_MASK 0x0008
+#define MADERA_SLIMRX4_ENA_SHIFT 3
+#define MADERA_SLIMRX4_ENA_WIDTH 1
+#define MADERA_SLIMRX3_ENA 0x0004
+#define MADERA_SLIMRX3_ENA_MASK 0x0004
+#define MADERA_SLIMRX3_ENA_SHIFT 2
+#define MADERA_SLIMRX3_ENA_WIDTH 1
+#define MADERA_SLIMRX2_ENA 0x0002
+#define MADERA_SLIMRX2_ENA_MASK 0x0002
+#define MADERA_SLIMRX2_ENA_SHIFT 1
+#define MADERA_SLIMRX2_ENA_WIDTH 1
+#define MADERA_SLIMRX1_ENA 0x0001
+#define MADERA_SLIMRX1_ENA_MASK 0x0001
+#define MADERA_SLIMRX1_ENA_SHIFT 0
+#define MADERA_SLIMRX1_ENA_WIDTH 1
+
+/* (0x05F6) SLIMbus_TX_Channel_Enable */
+#define MADERA_SLIMTX8_ENA 0x0080
+#define MADERA_SLIMTX8_ENA_MASK 0x0080
+#define MADERA_SLIMTX8_ENA_SHIFT 7
+#define MADERA_SLIMTX8_ENA_WIDTH 1
+#define MADERA_SLIMTX7_ENA 0x0040
+#define MADERA_SLIMTX7_ENA_MASK 0x0040
+#define MADERA_SLIMTX7_ENA_SHIFT 6
+#define MADERA_SLIMTX7_ENA_WIDTH 1
+#define MADERA_SLIMTX6_ENA 0x0020
+#define MADERA_SLIMTX6_ENA_MASK 0x0020
+#define MADERA_SLIMTX6_ENA_SHIFT 5
+#define MADERA_SLIMTX6_ENA_WIDTH 1
+#define MADERA_SLIMTX5_ENA 0x0010
+#define MADERA_SLIMTX5_ENA_MASK 0x0010
+#define MADERA_SLIMTX5_ENA_SHIFT 4
+#define MADERA_SLIMTX5_ENA_WIDTH 1
+#define MADERA_SLIMTX4_ENA 0x0008
+#define MADERA_SLIMTX4_ENA_MASK 0x0008
+#define MADERA_SLIMTX4_ENA_SHIFT 3
+#define MADERA_SLIMTX4_ENA_WIDTH 1
+#define MADERA_SLIMTX3_ENA 0x0004
+#define MADERA_SLIMTX3_ENA_MASK 0x0004
+#define MADERA_SLIMTX3_ENA_SHIFT 2
+#define MADERA_SLIMTX3_ENA_WIDTH 1
+#define MADERA_SLIMTX2_ENA 0x0002
+#define MADERA_SLIMTX2_ENA_MASK 0x0002
+#define MADERA_SLIMTX2_ENA_SHIFT 1
+#define MADERA_SLIMTX2_ENA_WIDTH 1
+#define MADERA_SLIMTX1_ENA 0x0001
+#define MADERA_SLIMTX1_ENA_MASK 0x0001
+#define MADERA_SLIMTX1_ENA_SHIFT 0
+#define MADERA_SLIMTX1_ENA_WIDTH 1
+
+/* (0x0E10) EQ1_1 */
+#define MADERA_EQ1_B1_GAIN_MASK 0xF800
+#define MADERA_EQ1_B1_GAIN_SHIFT 11
+#define MADERA_EQ1_B1_GAIN_WIDTH 5
+#define MADERA_EQ1_B2_GAIN_MASK 0x07C0
+#define MADERA_EQ1_B2_GAIN_SHIFT 6
+#define MADERA_EQ1_B2_GAIN_WIDTH 5
+#define MADERA_EQ1_B3_GAIN_MASK 0x003E
+#define MADERA_EQ1_B3_GAIN_SHIFT 1
+#define MADERA_EQ1_B3_GAIN_WIDTH 5
+#define MADERA_EQ1_ENA 0x0001
+#define MADERA_EQ1_ENA_MASK 0x0001
+#define MADERA_EQ1_ENA_SHIFT 0
+#define MADERA_EQ1_ENA_WIDTH 1
+
+/* (0x0E11) EQ1_2 */
+#define MADERA_EQ1_B4_GAIN_MASK 0xF800
+#define MADERA_EQ1_B4_GAIN_SHIFT 11
+#define MADERA_EQ1_B4_GAIN_WIDTH 5
+#define MADERA_EQ1_B5_GAIN_MASK 0x07C0
+#define MADERA_EQ1_B5_GAIN_SHIFT 6
+#define MADERA_EQ1_B5_GAIN_WIDTH 5
+#define MADERA_EQ1_B1_MODE 0x0001
+#define MADERA_EQ1_B1_MODE_MASK 0x0001
+#define MADERA_EQ1_B1_MODE_SHIFT 0
+#define MADERA_EQ1_B1_MODE_WIDTH 1
+
+/* (0x0E26) EQ2_1 */
+#define MADERA_EQ2_B1_GAIN_MASK 0xF800
+#define MADERA_EQ2_B1_GAIN_SHIFT 11
+#define MADERA_EQ2_B1_GAIN_WIDTH 5
+#define MADERA_EQ2_B2_GAIN_MASK 0x07C0
+#define MADERA_EQ2_B2_GAIN_SHIFT 6
+#define MADERA_EQ2_B2_GAIN_WIDTH 5
+#define MADERA_EQ2_B3_GAIN_MASK 0x003E
+#define MADERA_EQ2_B3_GAIN_SHIFT 1
+#define MADERA_EQ2_B3_GAIN_WIDTH 5
+#define MADERA_EQ2_ENA 0x0001
+#define MADERA_EQ2_ENA_MASK 0x0001
+#define MADERA_EQ2_ENA_SHIFT 0
+#define MADERA_EQ2_ENA_WIDTH 1
+
+/* (0x0E27) EQ2_2 */
+#define MADERA_EQ2_B4_GAIN_MASK 0xF800
+#define MADERA_EQ2_B4_GAIN_SHIFT 11
+#define MADERA_EQ2_B4_GAIN_WIDTH 5
+#define MADERA_EQ2_B5_GAIN_MASK 0x07C0
+#define MADERA_EQ2_B5_GAIN_SHIFT 6
+#define MADERA_EQ2_B5_GAIN_WIDTH 5
+#define MADERA_EQ2_B1_MODE 0x0001
+#define MADERA_EQ2_B1_MODE_MASK 0x0001
+#define MADERA_EQ2_B1_MODE_SHIFT 0
+#define MADERA_EQ2_B1_MODE_WIDTH 1
+
+/* (0x0E3C) EQ3_1 */
+#define MADERA_EQ3_B1_GAIN_MASK 0xF800
+#define MADERA_EQ3_B1_GAIN_SHIFT 11
+#define MADERA_EQ3_B1_GAIN_WIDTH 5
+#define MADERA_EQ3_B2_GAIN_MASK 0x07C0
+#define MADERA_EQ3_B2_GAIN_SHIFT 6
+#define MADERA_EQ3_B2_GAIN_WIDTH 5
+#define MADERA_EQ3_B3_GAIN_MASK 0x003E
+#define MADERA_EQ3_B3_GAIN_SHIFT 1
+#define MADERA_EQ3_B3_GAIN_WIDTH 5
+#define MADERA_EQ3_ENA 0x0001
+#define MADERA_EQ3_ENA_MASK 0x0001
+#define MADERA_EQ3_ENA_SHIFT 0
+#define MADERA_EQ3_ENA_WIDTH 1
+
+/* (0x0E3D) EQ3_2 */
+#define MADERA_EQ3_B4_GAIN_MASK 0xF800
+#define MADERA_EQ3_B4_GAIN_SHIFT 11
+#define MADERA_EQ3_B4_GAIN_WIDTH 5
+#define MADERA_EQ3_B5_GAIN_MASK 0x07C0
+#define MADERA_EQ3_B5_GAIN_SHIFT 6
+#define MADERA_EQ3_B5_GAIN_WIDTH 5
+#define MADERA_EQ3_B1_MODE 0x0001
+#define MADERA_EQ3_B1_MODE_MASK 0x0001
+#define MADERA_EQ3_B1_MODE_SHIFT 0
+#define MADERA_EQ3_B1_MODE_WIDTH 1
+
+/* (0x0E52) EQ4_1 */
+#define MADERA_EQ4_B1_GAIN_MASK 0xF800
+#define MADERA_EQ4_B1_GAIN_SHIFT 11
+#define MADERA_EQ4_B1_GAIN_WIDTH 5
+#define MADERA_EQ4_B2_GAIN_MASK 0x07C0
+#define MADERA_EQ4_B2_GAIN_SHIFT 6
+#define MADERA_EQ4_B2_GAIN_WIDTH 5
+#define MADERA_EQ4_B3_GAIN_MASK 0x003E
+#define MADERA_EQ4_B3_GAIN_SHIFT 1
+#define MADERA_EQ4_B3_GAIN_WIDTH 5
+#define MADERA_EQ4_ENA 0x0001
+#define MADERA_EQ4_ENA_MASK 0x0001
+#define MADERA_EQ4_ENA_SHIFT 0
+#define MADERA_EQ4_ENA_WIDTH 1
+
+/* (0x0E53) EQ4_2 */
+#define MADERA_EQ4_B4_GAIN_MASK 0xF800
+#define MADERA_EQ4_B4_GAIN_SHIFT 11
+#define MADERA_EQ4_B4_GAIN_WIDTH 5
+#define MADERA_EQ4_B5_GAIN_MASK 0x07C0
+#define MADERA_EQ4_B5_GAIN_SHIFT 6
+#define MADERA_EQ4_B5_GAIN_WIDTH 5
+#define MADERA_EQ4_B1_MODE 0x0001
+#define MADERA_EQ4_B1_MODE_MASK 0x0001
+#define MADERA_EQ4_B1_MODE_SHIFT 0
+#define MADERA_EQ4_B1_MODE_WIDTH 1
+
+/* (0x0E80) DRC1_ctrl1 */
+#define MADERA_DRC1L_ENA 0x0002
+#define MADERA_DRC1L_ENA_MASK 0x0002
+#define MADERA_DRC1L_ENA_SHIFT 1
+#define MADERA_DRC1L_ENA_WIDTH 1
+#define MADERA_DRC1R_ENA 0x0001
+#define MADERA_DRC1R_ENA_MASK 0x0001
+#define MADERA_DRC1R_ENA_SHIFT 0
+#define MADERA_DRC1R_ENA_WIDTH 1
+
+/* (0x0E88) DRC2_ctrl1 */
+#define MADERA_DRC2L_ENA 0x0002
+#define MADERA_DRC2L_ENA_MASK 0x0002
+#define MADERA_DRC2L_ENA_SHIFT 1
+#define MADERA_DRC2L_ENA_WIDTH 1
+#define MADERA_DRC2R_ENA 0x0001
+#define MADERA_DRC2R_ENA_MASK 0x0001
+#define MADERA_DRC2R_ENA_SHIFT 0
+#define MADERA_DRC2R_ENA_WIDTH 1
+
+/* (0x0EC0) HPLPF1_1 */
+#define MADERA_LHPF1_MODE 0x0002
+#define MADERA_LHPF1_MODE_MASK 0x0002
+#define MADERA_LHPF1_MODE_SHIFT 1
+#define MADERA_LHPF1_MODE_WIDTH 1
+#define MADERA_LHPF1_ENA 0x0001
+#define MADERA_LHPF1_ENA_MASK 0x0001
+#define MADERA_LHPF1_ENA_SHIFT 0
+#define MADERA_LHPF1_ENA_WIDTH 1
+
+/* (0x0EC1) HPLPF1_2 */
+#define MADERA_LHPF1_COEFF_MASK 0xFFFF
+#define MADERA_LHPF1_COEFF_SHIFT 0
+#define MADERA_LHPF1_COEFF_WIDTH 16
+
+/* (0x0EC4) HPLPF2_1 */
+#define MADERA_LHPF2_MODE 0x0002
+#define MADERA_LHPF2_MODE_MASK 0x0002
+#define MADERA_LHPF2_MODE_SHIFT 1
+#define MADERA_LHPF2_MODE_WIDTH 1
+#define MADERA_LHPF2_ENA 0x0001
+#define MADERA_LHPF2_ENA_MASK 0x0001
+#define MADERA_LHPF2_ENA_SHIFT 0
+#define MADERA_LHPF2_ENA_WIDTH 1
+
+/* (0x0EC5) HPLPF2_2 */
+#define MADERA_LHPF2_COEFF_MASK 0xFFFF
+#define MADERA_LHPF2_COEFF_SHIFT 0
+#define MADERA_LHPF2_COEFF_WIDTH 16
+
+/* (0x0EC8) HPLPF3_1 */
+#define MADERA_LHPF3_MODE 0x0002
+#define MADERA_LHPF3_MODE_MASK 0x0002
+#define MADERA_LHPF3_MODE_SHIFT 1
+#define MADERA_LHPF3_MODE_WIDTH 1
+#define MADERA_LHPF3_ENA 0x0001
+#define MADERA_LHPF3_ENA_MASK 0x0001
+#define MADERA_LHPF3_ENA_SHIFT 0
+#define MADERA_LHPF3_ENA_WIDTH 1
+
+/* (0x0EC9) HPLPF3_2 */
+#define MADERA_LHPF3_COEFF_MASK 0xFFFF
+#define MADERA_LHPF3_COEFF_SHIFT 0
+#define MADERA_LHPF3_COEFF_WIDTH 16
+
+/* (0x0ECC) HPLPF4_1 */
+#define MADERA_LHPF4_MODE 0x0002
+#define MADERA_LHPF4_MODE_MASK 0x0002
+#define MADERA_LHPF4_MODE_SHIFT 1
+#define MADERA_LHPF4_MODE_WIDTH 1
+#define MADERA_LHPF4_ENA 0x0001
+#define MADERA_LHPF4_ENA_MASK 0x0001
+#define MADERA_LHPF4_ENA_SHIFT 0
+#define MADERA_LHPF4_ENA_WIDTH 1
+
+/* (0x0ECD) HPLPF4_2 */
+#define MADERA_LHPF4_COEFF_MASK 0xFFFF
+#define MADERA_LHPF4_COEFF_SHIFT 0
+#define MADERA_LHPF4_COEFF_WIDTH 16
+
+/* (0x0ED0) ASRC2_ENABLE */
+#define MADERA_ASRC2_IN2L_ENA 0x0008
+#define MADERA_ASRC2_IN2L_ENA_MASK 0x0008
+#define MADERA_ASRC2_IN2L_ENA_SHIFT 3
+#define MADERA_ASRC2_IN2L_ENA_WIDTH 1
+#define MADERA_ASRC2_IN2R_ENA 0x0004
+#define MADERA_ASRC2_IN2R_ENA_MASK 0x0004
+#define MADERA_ASRC2_IN2R_ENA_SHIFT 2
+#define MADERA_ASRC2_IN2R_ENA_WIDTH 1
+#define MADERA_ASRC2_IN1L_ENA 0x0002
+#define MADERA_ASRC2_IN1L_ENA_MASK 0x0002
+#define MADERA_ASRC2_IN1L_ENA_SHIFT 1
+#define MADERA_ASRC2_IN1L_ENA_WIDTH 1
+#define MADERA_ASRC2_IN1R_ENA 0x0001
+#define MADERA_ASRC2_IN1R_ENA_MASK 0x0001
+#define MADERA_ASRC2_IN1R_ENA_SHIFT 0
+#define MADERA_ASRC2_IN1R_ENA_WIDTH 1
+
+/* (0x0ED2) ASRC2_RATE1 */
+#define MADERA_ASRC2_RATE1_MASK 0xF800
+#define MADERA_ASRC2_RATE1_SHIFT 11
+#define MADERA_ASRC2_RATE1_WIDTH 5
+
+/* (0x0ED3) ASRC2_RATE2 */
+#define MADERA_ASRC2_RATE2_MASK 0xF800
+#define MADERA_ASRC2_RATE2_SHIFT 11
+#define MADERA_ASRC2_RATE2_WIDTH 5
+
+/* (0x0EE0) ASRC1_ENABLE */
+#define MADERA_ASRC1_IN2L_ENA 0x0008
+#define MADERA_ASRC1_IN2L_ENA_MASK 0x0008
+#define MADERA_ASRC1_IN2L_ENA_SHIFT 3
+#define MADERA_ASRC1_IN2L_ENA_WIDTH 1
+#define MADERA_ASRC1_IN2R_ENA 0x0004
+#define MADERA_ASRC1_IN2R_ENA_MASK 0x0004
+#define MADERA_ASRC1_IN2R_ENA_SHIFT 2
+#define MADERA_ASRC1_IN2R_ENA_WIDTH 1
+#define MADERA_ASRC1_IN1L_ENA 0x0002
+#define MADERA_ASRC1_IN1L_ENA_MASK 0x0002
+#define MADERA_ASRC1_IN1L_ENA_SHIFT 1
+#define MADERA_ASRC1_IN1L_ENA_WIDTH 1
+#define MADERA_ASRC1_IN1R_ENA 0x0001
+#define MADERA_ASRC1_IN1R_ENA_MASK 0x0001
+#define MADERA_ASRC1_IN1R_ENA_SHIFT 0
+#define MADERA_ASRC1_IN1R_ENA_WIDTH 1
+
+/* (0x0EE2) ASRC1_RATE1 */
+#define MADERA_ASRC1_RATE1_MASK 0xF800
+#define MADERA_ASRC1_RATE1_SHIFT 11
+#define MADERA_ASRC1_RATE1_WIDTH 5
+
+/* (0x0EE3) ASRC1_RATE2 */
+#define MADERA_ASRC1_RATE2_MASK 0xF800
+#define MADERA_ASRC1_RATE2_SHIFT 11
+#define MADERA_ASRC1_RATE2_WIDTH 5
+
+/* (0x0EF0) - ISRC1 CTRL 1 */
+#define MADERA_ISRC1_FSH_MASK 0xF800
+#define MADERA_ISRC1_FSH_SHIFT 11
+#define MADERA_ISRC1_FSH_WIDTH 5
+#define MADERA_ISRC1_CLK_SEL_MASK 0x0700
+#define MADERA_ISRC1_CLK_SEL_SHIFT 8
+#define MADERA_ISRC1_CLK_SEL_WIDTH 3
+
+/* (0x0EF1) ISRC1_CTRL_2 */
+#define MADERA_ISRC1_FSL_MASK 0xF800
+#define MADERA_ISRC1_FSL_SHIFT 11
+#define MADERA_ISRC1_FSL_WIDTH 5
+
+/* (0x0EF2) ISRC1_CTRL_3 */
+#define MADERA_ISRC1_INT1_ENA 0x8000
+#define MADERA_ISRC1_INT1_ENA_MASK 0x8000
+#define MADERA_ISRC1_INT1_ENA_SHIFT 15
+#define MADERA_ISRC1_INT1_ENA_WIDTH 1
+#define MADERA_ISRC1_INT2_ENA 0x4000
+#define MADERA_ISRC1_INT2_ENA_MASK 0x4000
+#define MADERA_ISRC1_INT2_ENA_SHIFT 14
+#define MADERA_ISRC1_INT2_ENA_WIDTH 1
+#define MADERA_ISRC1_INT3_ENA 0x2000
+#define MADERA_ISRC1_INT3_ENA_MASK 0x2000
+#define MADERA_ISRC1_INT3_ENA_SHIFT 13
+#define MADERA_ISRC1_INT3_ENA_WIDTH 1
+#define MADERA_ISRC1_INT4_ENA 0x1000
+#define MADERA_ISRC1_INT4_ENA_MASK 0x1000
+#define MADERA_ISRC1_INT4_ENA_SHIFT 12
+#define MADERA_ISRC1_INT4_ENA_WIDTH 1
+#define MADERA_ISRC1_DEC1_ENA 0x0200
+#define MADERA_ISRC1_DEC1_ENA_MASK 0x0200
+#define MADERA_ISRC1_DEC1_ENA_SHIFT 9
+#define MADERA_ISRC1_DEC1_ENA_WIDTH 1
+#define MADERA_ISRC1_DEC2_ENA 0x0100
+#define MADERA_ISRC1_DEC2_ENA_MASK 0x0100
+#define MADERA_ISRC1_DEC2_ENA_SHIFT 8
+#define MADERA_ISRC1_DEC2_ENA_WIDTH 1
+#define MADERA_ISRC1_DEC3_ENA 0x0080
+#define MADERA_ISRC1_DEC3_ENA_MASK 0x0080
+#define MADERA_ISRC1_DEC3_ENA_SHIFT 7
+#define MADERA_ISRC1_DEC3_ENA_WIDTH 1
+#define MADERA_ISRC1_DEC4_ENA 0x0040
+#define MADERA_ISRC1_DEC4_ENA_MASK 0x0040
+#define MADERA_ISRC1_DEC4_ENA_SHIFT 6
+#define MADERA_ISRC1_DEC4_ENA_WIDTH 1
+#define MADERA_ISRC1_NOTCH_ENA 0x0001
+#define MADERA_ISRC1_NOTCH_ENA_MASK 0x0001
+#define MADERA_ISRC1_NOTCH_ENA_SHIFT 0
+#define MADERA_ISRC1_NOTCH_ENA_WIDTH 1
+
+/* (0x0EF3) ISRC2_CTRL_1 */
+#define MADERA_ISRC2_FSH_MASK 0xF800
+#define MADERA_ISRC2_FSH_SHIFT 11
+#define MADERA_ISRC2_FSH_WIDTH 5
+#define MADERA_ISRC2_CLK_SEL_MASK 0x0700
+#define MADERA_ISRC2_CLK_SEL_SHIFT 8
+#define MADERA_ISRC2_CLK_SEL_WIDTH 3
+
+/* (0x0EF4) ISRC2_CTRL_2 */
+#define MADERA_ISRC2_FSL_MASK 0xF800
+#define MADERA_ISRC2_FSL_SHIFT 11
+#define MADERA_ISRC2_FSL_WIDTH 5
+
+/* (0x0EF5) ISRC2_CTRL_3 */
+#define MADERA_ISRC2_INT1_ENA 0x8000
+#define MADERA_ISRC2_INT1_ENA_MASK 0x8000
+#define MADERA_ISRC2_INT1_ENA_SHIFT 15
+#define MADERA_ISRC2_INT1_ENA_WIDTH 1
+#define MADERA_ISRC2_INT2_ENA 0x4000
+#define MADERA_ISRC2_INT2_ENA_MASK 0x4000
+#define MADERA_ISRC2_INT2_ENA_SHIFT 14
+#define MADERA_ISRC2_INT2_ENA_WIDTH 1
+#define MADERA_ISRC2_INT3_ENA 0x2000
+#define MADERA_ISRC2_INT3_ENA_MASK 0x2000
+#define MADERA_ISRC2_INT3_ENA_SHIFT 13
+#define MADERA_ISRC2_INT3_ENA_WIDTH 1
+#define MADERA_ISRC2_INT4_ENA 0x1000
+#define MADERA_ISRC2_INT4_ENA_MASK 0x1000
+#define MADERA_ISRC2_INT4_ENA_SHIFT 12
+#define MADERA_ISRC2_INT4_ENA_WIDTH 1
+#define MADERA_ISRC2_DEC1_ENA 0x0200
+#define MADERA_ISRC2_DEC1_ENA_MASK 0x0200
+#define MADERA_ISRC2_DEC1_ENA_SHIFT 9
+#define MADERA_ISRC2_DEC1_ENA_WIDTH 1
+#define MADERA_ISRC2_DEC2_ENA 0x0100
+#define MADERA_ISRC2_DEC2_ENA_MASK 0x0100
+#define MADERA_ISRC2_DEC2_ENA_SHIFT 8
+#define MADERA_ISRC2_DEC2_ENA_WIDTH 1
+#define MADERA_ISRC2_DEC3_ENA 0x0080
+#define MADERA_ISRC2_DEC3_ENA_MASK 0x0080
+#define MADERA_ISRC2_DEC3_ENA_SHIFT 7
+#define MADERA_ISRC2_DEC3_ENA_WIDTH 1
+#define MADERA_ISRC2_DEC4_ENA 0x0040
+#define MADERA_ISRC2_DEC4_ENA_MASK 0x0040
+#define MADERA_ISRC2_DEC4_ENA_SHIFT 6
+#define MADERA_ISRC2_DEC4_ENA_WIDTH 1
+#define MADERA_ISRC2_NOTCH_ENA 0x0001
+#define MADERA_ISRC2_NOTCH_ENA_MASK 0x0001
+#define MADERA_ISRC2_NOTCH_ENA_SHIFT 0
+#define MADERA_ISRC2_NOTCH_ENA_WIDTH 1
+
+/* (0x0EF6) ISRC3_CTRL_1 */
+#define MADERA_ISRC3_FSH_MASK 0xF800
+#define MADERA_ISRC3_FSH_SHIFT 11
+#define MADERA_ISRC3_FSH_WIDTH 5
+#define MADERA_ISRC3_CLK_SEL_MASK 0x0700
+#define MADERA_ISRC3_CLK_SEL_SHIFT 8
+#define MADERA_ISRC3_CLK_SEL_WIDTH 3
+
+/* (0x0EF7) ISRC3_CTRL_2 */
+#define MADERA_ISRC3_FSL_MASK 0xF800
+#define MADERA_ISRC3_FSL_SHIFT 11
+#define MADERA_ISRC3_FSL_WIDTH 5
+
+/* (0x0EF8) ISRC3_CTRL_3 */
+#define MADERA_ISRC3_INT1_ENA 0x8000
+#define MADERA_ISRC3_INT1_ENA_MASK 0x8000
+#define MADERA_ISRC3_INT1_ENA_SHIFT 15
+#define MADERA_ISRC3_INT1_ENA_WIDTH 1
+#define MADERA_ISRC3_INT2_ENA 0x4000
+#define MADERA_ISRC3_INT2_ENA_MASK 0x4000
+#define MADERA_ISRC3_INT2_ENA_SHIFT 14
+#define MADERA_ISRC3_INT2_ENA_WIDTH 1
+#define MADERA_ISRC3_INT3_ENA 0x2000
+#define MADERA_ISRC3_INT3_ENA_MASK 0x2000
+#define MADERA_ISRC3_INT3_ENA_SHIFT 13
+#define MADERA_ISRC3_INT3_ENA_WIDTH 1
+#define MADERA_ISRC3_INT4_ENA 0x1000
+#define MADERA_ISRC3_INT4_ENA_MASK 0x1000
+#define MADERA_ISRC3_INT4_ENA_SHIFT 12
+#define MADERA_ISRC3_INT4_ENA_WIDTH 1
+#define MADERA_ISRC3_DEC1_ENA 0x0200
+#define MADERA_ISRC3_DEC1_ENA_MASK 0x0200
+#define MADERA_ISRC3_DEC1_ENA_SHIFT 9
+#define MADERA_ISRC3_DEC1_ENA_WIDTH 1
+#define MADERA_ISRC3_DEC2_ENA 0x0100
+#define MADERA_ISRC3_DEC2_ENA_MASK 0x0100
+#define MADERA_ISRC3_DEC2_ENA_SHIFT 8
+#define MADERA_ISRC3_DEC2_ENA_WIDTH 1
+#define MADERA_ISRC3_DEC3_ENA 0x0080
+#define MADERA_ISRC3_DEC3_ENA_MASK 0x0080
+#define MADERA_ISRC3_DEC3_ENA_SHIFT 7
+#define MADERA_ISRC3_DEC3_ENA_WIDTH 1
+#define MADERA_ISRC3_DEC4_ENA 0x0040
+#define MADERA_ISRC3_DEC4_ENA_MASK 0x0040
+#define MADERA_ISRC3_DEC4_ENA_SHIFT 6
+#define MADERA_ISRC3_DEC4_ENA_WIDTH 1
+#define MADERA_ISRC3_NOTCH_ENA 0x0001
+#define MADERA_ISRC3_NOTCH_ENA_MASK 0x0001
+#define MADERA_ISRC3_NOTCH_ENA_SHIFT 0
+#define MADERA_ISRC3_NOTCH_ENA_WIDTH 1
+
+/* (0x0EF9) ISRC4_CTRL_1 */
+#define MADERA_ISRC4_FSH_MASK 0xF800
+#define MADERA_ISRC4_FSH_SHIFT 11
+#define MADERA_ISRC4_FSH_WIDTH 5
+#define MADERA_ISRC4_CLK_SEL_MASK 0x0700
+#define MADERA_ISRC4_CLK_SEL_SHIFT 8
+#define MADERA_ISRC4_CLK_SEL_WIDTH 3
+
+/* (0x0EFA) ISRC4_CTRL_2 */
+#define MADERA_ISRC4_FSL_MASK 0xF800
+#define MADERA_ISRC4_FSL_SHIFT 11
+#define MADERA_ISRC4_FSL_WIDTH 5
+
+/* (0x0EFB) ISRC4_CTRL_3 */
+#define MADERA_ISRC4_INT1_ENA 0x8000
+#define MADERA_ISRC4_INT1_ENA_MASK 0x8000
+#define MADERA_ISRC4_INT1_ENA_SHIFT 15
+#define MADERA_ISRC4_INT1_ENA_WIDTH 1
+#define MADERA_ISRC4_INT2_ENA 0x4000
+#define MADERA_ISRC4_INT2_ENA_MASK 0x4000
+#define MADERA_ISRC4_INT2_ENA_SHIFT 14
+#define MADERA_ISRC4_INT2_ENA_WIDTH 1
+#define MADERA_ISRC4_INT3_ENA 0x2000
+#define MADERA_ISRC4_INT3_ENA_MASK 0x2000
+#define MADERA_ISRC4_INT3_ENA_SHIFT 13
+#define MADERA_ISRC4_INT3_ENA_WIDTH 1
+#define MADERA_ISRC4_INT4_ENA 0x1000
+#define MADERA_ISRC4_INT4_ENA_MASK 0x1000
+#define MADERA_ISRC4_INT4_ENA_SHIFT 12
+#define MADERA_ISRC4_INT4_ENA_WIDTH 1
+#define MADERA_ISRC4_DEC1_ENA 0x0200
+#define MADERA_ISRC4_DEC1_ENA_MASK 0x0200
+#define MADERA_ISRC4_DEC1_ENA_SHIFT 9
+#define MADERA_ISRC4_DEC1_ENA_WIDTH 1
+#define MADERA_ISRC4_DEC2_ENA 0x0100
+#define MADERA_ISRC4_DEC2_ENA_MASK 0x0100
+#define MADERA_ISRC4_DEC2_ENA_SHIFT 8
+#define MADERA_ISRC4_DEC2_ENA_WIDTH 1
+#define MADERA_ISRC4_DEC3_ENA 0x0080
+#define MADERA_ISRC4_DEC3_ENA_MASK 0x0080
+#define MADERA_ISRC4_DEC3_ENA_SHIFT 7
+#define MADERA_ISRC4_DEC3_ENA_WIDTH 1
+#define MADERA_ISRC4_DEC4_ENA 0x0040
+#define MADERA_ISRC4_DEC4_ENA_MASK 0x0040
+#define MADERA_ISRC4_DEC4_ENA_SHIFT 6
+#define MADERA_ISRC4_DEC4_ENA_WIDTH 1
+#define MADERA_ISRC4_NOTCH_ENA 0x0001
+#define MADERA_ISRC4_NOTCH_ENA_MASK 0x0001
+#define MADERA_ISRC4_NOTCH_ENA_SHIFT 0
+#define MADERA_ISRC4_NOTCH_ENA_WIDTH 1
+
+/* (0x0F00) Clock_Control */
+#define MADERA_EXT_NG_SEL_CLR 0x0080
+#define MADERA_EXT_NG_SEL_CLR_MASK 0x0080
+#define MADERA_EXT_NG_SEL_CLR_SHIFT 7
+#define MADERA_EXT_NG_SEL_CLR_WIDTH 1
+#define MADERA_EXT_NG_SEL_SET 0x0040
+#define MADERA_EXT_NG_SEL_SET_MASK 0x0040
+#define MADERA_EXT_NG_SEL_SET_SHIFT 6
+#define MADERA_EXT_NG_SEL_SET_WIDTH 1
+#define MADERA_CLK_R_ENA_CLR 0x0020
+#define MADERA_CLK_R_ENA_CLR_MASK 0x0020
+#define MADERA_CLK_R_ENA_CLR_SHIFT 5
+#define MADERA_CLK_R_ENA_CLR_WIDTH 1
+#define MADERA_CLK_R_ENA_SET 0x0010
+#define MADERA_CLK_R_ENA_SET_MASK 0x0010
+#define MADERA_CLK_R_ENA_SET_SHIFT 4
+#define MADERA_CLK_R_ENA_SET_WIDTH 1
+#define MADERA_CLK_NG_ENA_CLR 0x0008
+#define MADERA_CLK_NG_ENA_CLR_MASK 0x0008
+#define MADERA_CLK_NG_ENA_CLR_SHIFT 3
+#define MADERA_CLK_NG_ENA_CLR_WIDTH 1
+#define MADERA_CLK_NG_ENA_SET 0x0004
+#define MADERA_CLK_NG_ENA_SET_MASK 0x0004
+#define MADERA_CLK_NG_ENA_SET_SHIFT 2
+#define MADERA_CLK_NG_ENA_SET_WIDTH 1
+#define MADERA_CLK_L_ENA_CLR 0x0002
+#define MADERA_CLK_L_ENA_CLR_MASK 0x0002
+#define MADERA_CLK_L_ENA_CLR_SHIFT 1
+#define MADERA_CLK_L_ENA_CLR_WIDTH 1
+#define MADERA_CLK_L_ENA_SET 0x0001
+#define MADERA_CLK_L_ENA_SET_MASK 0x0001
+#define MADERA_CLK_L_ENA_SET_SHIFT 0
+#define MADERA_CLK_L_ENA_SET_WIDTH 1
+
+/* (0x0F01) ANC_SRC */
+#define MADERA_IN_RXANCR_SEL_MASK 0x0070
+#define MADERA_IN_RXANCR_SEL_SHIFT 4
+#define MADERA_IN_RXANCR_SEL_WIDTH 3
+#define MADERA_IN_RXANCL_SEL_MASK 0x0007
+#define MADERA_IN_RXANCL_SEL_SHIFT 0
+#define MADERA_IN_RXANCL_SEL_WIDTH 3
+
+/* (0x0F17) FCL_ADC_reformatter_control */
+#define MADERA_FCL_MIC_MODE_SEL 0x000C
+#define MADERA_FCL_MIC_MODE_SEL_SHIFT 2
+#define MADERA_FCL_MIC_MODE_SEL_WIDTH 2
+
+/* (0x0F73) FCR_ADC_reformatter_control */
+#define MADERA_FCR_MIC_MODE_SEL 0x000C
+#define MADERA_FCR_MIC_MODE_SEL_SHIFT 2
+#define MADERA_FCR_MIC_MODE_SEL_WIDTH 2
+
+/* (0x1480) DFC1_CTRL_W0 */
+#define MADERA_DFC1_RATE_MASK 0x007C
+#define MADERA_DFC1_RATE_SHIFT 2
+#define MADERA_DFC1_RATE_WIDTH 5
+#define MADERA_DFC1_DITH_ENA 0x0002
+#define MADERA_DFC1_DITH_ENA_MASK 0x0002
+#define MADERA_DFC1_DITH_ENA_SHIFT 1
+#define MADERA_DFC1_DITH_ENA_WIDTH 1
+#define MADERA_DFC1_ENA 0x0001
+#define MADERA_DFC1_ENA_MASK 0x0001
+#define MADERA_DFC1_ENA_SHIFT 0
+#define MADERA_DFC1_ENA_WIDTH 1
+
+/* (0x1482) DFC1_RX_W0 */
+#define MADERA_DFC1_RX_DATA_WIDTH_MASK 0x1F00
+#define MADERA_DFC1_RX_DATA_WIDTH_SHIFT 8
+#define MADERA_DFC1_RX_DATA_WIDTH_WIDTH 5
+
+#define MADERA_DFC1_RX_DATA_TYPE_MASK 0x0007
+#define MADERA_DFC1_RX_DATA_TYPE_SHIFT 0
+#define MADERA_DFC1_RX_DATA_TYPE_WIDTH 3
+
+/* (0x1484) DFC1_TX_W0 */
+#define MADERA_DFC1_TX_DATA_WIDTH_MASK 0x1F00
+#define MADERA_DFC1_TX_DATA_WIDTH_SHIFT 8
+#define MADERA_DFC1_TX_DATA_WIDTH_WIDTH 5
+
+#define MADERA_DFC1_TX_DATA_TYPE_MASK 0x0007
+#define MADERA_DFC1_TX_DATA_TYPE_SHIFT 0
+#define MADERA_DFC1_TX_DATA_TYPE_WIDTH 3
+
+/* (0x1600) ADSP2_IRQ0 */
+#define MADERA_DSP_IRQ2 0x0002
+#define MADERA_DSP_IRQ1 0x0001
+
+/* (0x1601) ADSP2_IRQ1 */
+#define MADERA_DSP_IRQ4 0x0002
+#define MADERA_DSP_IRQ3 0x0001
+
+/* (0x1602) ADSP2_IRQ2 */
+#define MADERA_DSP_IRQ6 0x0002
+#define MADERA_DSP_IRQ5 0x0001
+
+/* (0x1603) ADSP2_IRQ3 */
+#define MADERA_DSP_IRQ8 0x0002
+#define MADERA_DSP_IRQ7 0x0001
+
+/* (0x1604) ADSP2_IRQ4 */
+#define MADERA_DSP_IRQ10 0x0002
+#define MADERA_DSP_IRQ9 0x0001
+
+/* (0x1605) ADSP2_IRQ5 */
+#define MADERA_DSP_IRQ12 0x0002
+#define MADERA_DSP_IRQ11 0x0001
+
+/* (0x1606) ADSP2_IRQ6 */
+#define MADERA_DSP_IRQ14 0x0002
+#define MADERA_DSP_IRQ13 0x0001
+
+/* (0x1607) ADSP2_IRQ7 */
+#define MADERA_DSP_IRQ16 0x0002
+#define MADERA_DSP_IRQ15 0x0001
+
+/* (0x1700) GPIO1_CTRL_1 */
+#define MADERA_GP1_LVL 0x8000
+#define MADERA_GP1_LVL_MASK 0x8000
+#define MADERA_GP1_LVL_SHIFT 15
+#define MADERA_GP1_LVL_WIDTH 1
+#define MADERA_GP1_OP_CFG 0x4000
+#define MADERA_GP1_OP_CFG_MASK 0x4000
+#define MADERA_GP1_OP_CFG_SHIFT 14
+#define MADERA_GP1_OP_CFG_WIDTH 1
+#define MADERA_GP1_DB 0x2000
+#define MADERA_GP1_DB_MASK 0x2000
+#define MADERA_GP1_DB_SHIFT 13
+#define MADERA_GP1_DB_WIDTH 1
+#define MADERA_GP1_POL 0x1000
+#define MADERA_GP1_POL_MASK 0x1000
+#define MADERA_GP1_POL_SHIFT 12
+#define MADERA_GP1_POL_WIDTH 1
+#define MADERA_GP1_IP_CFG 0x0800
+#define MADERA_GP1_IP_CFG_MASK 0x0800
+#define MADERA_GP1_IP_CFG_SHIFT 11
+#define MADERA_GP1_IP_CFG_WIDTH 1
+#define MADERA_GP1_FN_MASK 0x03FF
+#define MADERA_GP1_FN_SHIFT 0
+#define MADERA_GP1_FN_WIDTH 10
+
+/* (0x1701) GPIO1_CTRL_2 */
+#define MADERA_GP1_DIR 0x8000
+#define MADERA_GP1_DIR_MASK 0x8000
+#define MADERA_GP1_DIR_SHIFT 15
+#define MADERA_GP1_DIR_WIDTH 1
+#define MADERA_GP1_PU 0x4000
+#define MADERA_GP1_PU_MASK 0x4000
+#define MADERA_GP1_PU_SHIFT 14
+#define MADERA_GP1_PU_WIDTH 1
+#define MADERA_GP1_PD 0x2000
+#define MADERA_GP1_PD_MASK 0x2000
+#define MADERA_GP1_PD_SHIFT 13
+#define MADERA_GP1_PD_WIDTH 1
+#define MADERA_GP1_DRV_STR_MASK 0x1800
+#define MADERA_GP1_DRV_STR_SHIFT 11
+#define MADERA_GP1_DRV_STR_WIDTH 2
+
+/* (0x1800) IRQ1_Status_1 */
+#define MADERA_CTRLIF_ERR_EINT1 0x1000
+#define MADERA_CTRLIF_ERR_EINT1_MASK 0x1000
+#define MADERA_CTRLIF_ERR_EINT1_SHIFT 12
+#define MADERA_CTRLIF_ERR_EINT1_WIDTH 1
+#define MADERA_SYSCLK_FAIL_EINT1 0x0200
+#define MADERA_SYSCLK_FAIL_EINT1_MASK 0x0200
+#define MADERA_SYSCLK_FAIL_EINT1_SHIFT 9
+#define MADERA_SYSCLK_FAIL_EINT1_WIDTH 1
+#define MADERA_CLOCK_DETECT_EINT1 0x0100
+#define MADERA_CLOCK_DETECT_EINT1_MASK 0x0100
+#define MADERA_CLOCK_DETECT_EINT1_SHIFT 8
+#define MADERA_CLOCK_DETECT_EINT1_WIDTH 1
+#define MADERA_BOOT_DONE_EINT1 0x0080
+#define MADERA_BOOT_DONE_EINT1_MASK 0x0080
+#define MADERA_BOOT_DONE_EINT1_SHIFT 7
+#define MADERA_BOOT_DONE_EINT1_WIDTH 1
+
+/* (0x1801) IRQ1_Status_2 */
+#define MADERA_FLLAO_LOCK_EINT1 0x0800
+#define MADERA_FLLAO_LOCK_EINT1_MASK 0x0800
+#define MADERA_FLLAO_LOCK_EINT1_SHIFT 11
+#define MADERA_FLLAO_LOCK_EINT1_WIDTH 1
+#define MADERA_FLL3_LOCK_EINT1 0x0400
+#define MADERA_FLL3_LOCK_EINT1_MASK 0x0400
+#define MADERA_FLL3_LOCK_EINT1_SHIFT 10
+#define MADERA_FLL3_LOCK_EINT1_WIDTH 1
+#define MADERA_FLL2_LOCK_EINT1 0x0200
+#define MADERA_FLL2_LOCK_EINT1_MASK 0x0200
+#define MADERA_FLL2_LOCK_EINT1_SHIFT 9
+#define MADERA_FLL2_LOCK_EINT1_WIDTH 1
+#define MADERA_FLL1_LOCK_EINT1 0x0100
+#define MADERA_FLL1_LOCK_EINT1_MASK 0x0100
+#define MADERA_FLL1_LOCK_EINT1_SHIFT 8
+#define MADERA_FLL1_LOCK_EINT1_WIDTH 1
+
+/* (0x1805) IRQ1_Status_6 */
+#define MADERA_MICDET2_EINT1 0x0200
+#define MADERA_MICDET2_EINT1_MASK 0x0200
+#define MADERA_MICDET2_EINT1_SHIFT 9
+#define MADERA_MICDET2_EINT1_WIDTH 1
+#define MADERA_MICDET1_EINT1 0x0100
+#define MADERA_MICDET1_EINT1_MASK 0x0100
+#define MADERA_MICDET1_EINT1_SHIFT 8
+#define MADERA_MICDET1_EINT1_WIDTH 1
+#define MADERA_HPDET_EINT1 0x0001
+#define MADERA_HPDET_EINT1_MASK 0x0001
+#define MADERA_HPDET_EINT1_SHIFT 0
+#define MADERA_HPDET_EINT1_WIDTH 1
+
+/* (0x1806) IRQ1_Status_7 */
+#define MADERA_MICD_CLAMP_FALL_EINT1 0x0020
+#define MADERA_MICD_CLAMP_FALL_EINT1_MASK 0x0020
+#define MADERA_MICD_CLAMP_FALL_EINT1_SHIFT 5
+#define MADERA_MICD_CLAMP_FALL_EINT1_WIDTH 1
+#define MADERA_MICD_CLAMP_RISE_EINT1 0x0010
+#define MADERA_MICD_CLAMP_RISE_EINT1_MASK 0x0010
+#define MADERA_MICD_CLAMP_RISE_EINT1_SHIFT 4
+#define MADERA_MICD_CLAMP_RISE_EINT1_WIDTH 1
+#define MADERA_JD2_FALL_EINT1 0x0008
+#define MADERA_JD2_FALL_EINT1_MASK 0x0008
+#define MADERA_JD2_FALL_EINT1_SHIFT 3
+#define MADERA_JD2_FALL_EINT1_WIDTH 1
+#define MADERA_JD2_RISE_EINT1 0x0004
+#define MADERA_JD2_RISE_EINT1_MASK 0x0004
+#define MADERA_JD2_RISE_EINT1_SHIFT 2
+#define MADERA_JD2_RISE_EINT1_WIDTH 1
+#define MADERA_JD1_FALL_EINT1 0x0002
+#define MADERA_JD1_FALL_EINT1_MASK 0x0002
+#define MADERA_JD1_FALL_EINT1_SHIFT 1
+#define MADERA_JD1_FALL_EINT1_WIDTH 1
+#define MADERA_JD1_RISE_EINT1 0x0001
+#define MADERA_JD1_RISE_EINT1_MASK 0x0001
+#define MADERA_JD1_RISE_EINT1_SHIFT 0
+#define MADERA_JD1_RISE_EINT1_WIDTH 1
+
+/* (0x1808) IRQ1_Status_9 */
+#define MADERA_ASRC2_IN2_LOCK_EINT1 0x0800
+#define MADERA_ASRC2_IN2_LOCK_EINT1_MASK 0x0800
+#define MADERA_ASRC2_IN2_LOCK_EINT1_SHIFT 11
+#define MADERA_ASRC2_IN2_LOCK_EINT1_WIDTH 1
+#define MADERA_ASRC2_IN1_LOCK_EINT1 0x0400
+#define MADERA_ASRC2_IN1_LOCK_EINT1_MASK 0x0400
+#define MADERA_ASRC2_IN1_LOCK_EINT1_SHIFT 10
+#define MADERA_ASRC2_IN1_LOCK_EINT1_WIDTH 1
+#define MADERA_ASRC1_IN2_LOCK_EINT1 0x0200
+#define MADERA_ASRC1_IN2_LOCK_EINT1_MASK 0x0200
+#define MADERA_ASRC1_IN2_LOCK_EINT1_SHIFT 9
+#define MADERA_ASRC1_IN2_LOCK_EINT1_WIDTH 1
+#define MADERA_ASRC1_IN1_LOCK_EINT1 0x0100
+#define MADERA_ASRC1_IN1_LOCK_EINT1_MASK 0x0100
+#define MADERA_ASRC1_IN1_LOCK_EINT1_SHIFT 8
+#define MADERA_ASRC1_IN1_LOCK_EINT1_WIDTH 1
+#define MADERA_DRC2_SIG_DET_EINT1 0x0002
+#define MADERA_DRC2_SIG_DET_EINT1_MASK 0x0002
+#define MADERA_DRC2_SIG_DET_EINT1_SHIFT 1
+#define MADERA_DRC2_SIG_DET_EINT1_WIDTH 1
+#define MADERA_DRC1_SIG_DET_EINT1 0x0001
+#define MADERA_DRC1_SIG_DET_EINT1_MASK 0x0001
+#define MADERA_DRC1_SIG_DET_EINT1_SHIFT 0
+#define MADERA_DRC1_SIG_DET_EINT1_WIDTH 1
+
+/* (0x180A) IRQ1_Status_11 */
+#define MADERA_DSP_IRQ16_EINT1 0x8000
+#define MADERA_DSP_IRQ16_EINT1_MASK 0x8000
+#define MADERA_DSP_IRQ16_EINT1_SHIFT 15
+#define MADERA_DSP_IRQ16_EINT1_WIDTH 1
+#define MADERA_DSP_IRQ15_EINT1 0x4000
+#define MADERA_DSP_IRQ15_EINT1_MASK 0x4000
+#define MADERA_DSP_IRQ15_EINT1_SHIFT 14
+#define MADERA_DSP_IRQ15_EINT1_WIDTH 1
+#define MADERA_DSP_IRQ14_EINT1 0x2000
+#define MADERA_DSP_IRQ14_EINT1_MASK 0x2000
+#define MADERA_DSP_IRQ14_EINT1_SHIFT 13
+#define MADERA_DSP_IRQ14_EINT1_WIDTH 1
+#define MADERA_DSP_IRQ13_EINT1 0x1000
+#define MADERA_DSP_IRQ13_EINT1_MASK 0x1000
+#define MADERA_DSP_IRQ13_EINT1_SHIFT 12
+#define MADERA_DSP_IRQ13_EINT1_WIDTH 1
+#define MADERA_DSP_IRQ12_EINT1 0x0800
+#define MADERA_DSP_IRQ12_EINT1_MASK 0x0800
+#define MADERA_DSP_IRQ12_EINT1_SHIFT 11
+#define MADERA_DSP_IRQ12_EINT1_WIDTH 1
+#define MADERA_DSP_IRQ11_EINT1 0x0400
+#define MADERA_DSP_IRQ11_EINT1_MASK 0x0400
+#define MADERA_DSP_IRQ11_EINT1_SHIFT 10
+#define MADERA_DSP_IRQ11_EINT1_WIDTH 1
+#define MADERA_DSP_IRQ10_EINT1 0x0200
+#define MADERA_DSP_IRQ10_EINT1_MASK 0x0200
+#define MADERA_DSP_IRQ10_EINT1_SHIFT 9
+#define MADERA_DSP_IRQ10_EINT1_WIDTH 1
+#define MADERA_DSP_IRQ9_EINT1 0x0100
+#define MADERA_DSP_IRQ9_EINT1_MASK 0x0100
+#define MADERA_DSP_IRQ9_EINT1_SHIFT 8
+#define MADERA_DSP_IRQ9_EINT1_WIDTH 1
+#define MADERA_DSP_IRQ8_EINT1 0x0080
+#define MADERA_DSP_IRQ8_EINT1_MASK 0x0080
+#define MADERA_DSP_IRQ8_EINT1_SHIFT 7
+#define MADERA_DSP_IRQ8_EINT1_WIDTH 1
+#define MADERA_DSP_IRQ7_EINT1 0x0040
+#define MADERA_DSP_IRQ7_EINT1_MASK 0x0040
+#define MADERA_DSP_IRQ7_EINT1_SHIFT 6
+#define MADERA_DSP_IRQ7_EINT1_WIDTH 1
+#define MADERA_DSP_IRQ6_EINT1 0x0020
+#define MADERA_DSP_IRQ6_EINT1_MASK 0x0020
+#define MADERA_DSP_IRQ6_EINT1_SHIFT 5
+#define MADERA_DSP_IRQ6_EINT1_WIDTH 1
+#define MADERA_DSP_IRQ5_EINT1 0x0010
+#define MADERA_DSP_IRQ5_EINT1_MASK 0x0010
+#define MADERA_DSP_IRQ5_EINT1_SHIFT 4
+#define MADERA_DSP_IRQ5_EINT1_WIDTH 1
+#define MADERA_DSP_IRQ4_EINT1 0x0008
+#define MADERA_DSP_IRQ4_EINT1_MASK 0x0008
+#define MADERA_DSP_IRQ4_EINT1_SHIFT 3
+#define MADERA_DSP_IRQ4_EINT1_WIDTH 1
+#define MADERA_DSP_IRQ3_EINT1 0x0004
+#define MADERA_DSP_IRQ3_EINT1_MASK 0x0004
+#define MADERA_DSP_IRQ3_EINT1_SHIFT 2
+#define MADERA_DSP_IRQ3_EINT1_WIDTH 1
+#define MADERA_DSP_IRQ2_EINT1 0x0002
+#define MADERA_DSP_IRQ2_EINT1_MASK 0x0002
+#define MADERA_DSP_IRQ2_EINT1_SHIFT 1
+#define MADERA_DSP_IRQ2_EINT1_WIDTH 1
+#define MADERA_DSP_IRQ1_EINT1 0x0001
+#define MADERA_DSP_IRQ1_EINT1_MASK 0x0001
+#define MADERA_DSP_IRQ1_EINT1_SHIFT 0
+#define MADERA_DSP_IRQ1_EINT1_WIDTH 1
+
+/* (0x180B) IRQ1_Status_12 */
+#define MADERA_SPKOUTR_SC_EINT1 0x0080
+#define MADERA_SPKOUTR_SC_EINT1_MASK 0x0080
+#define MADERA_SPKOUTR_SC_EINT1_SHIFT 7
+#define MADERA_SPKOUTR_SC_EINT1_WIDTH 1
+#define MADERA_SPKOUTL_SC_EINT1 0x0040
+#define MADERA_SPKOUTL_SC_EINT1_MASK 0x0040
+#define MADERA_SPKOUTL_SC_EINT1_SHIFT 6
+#define MADERA_SPKOUTL_SC_EINT1_WIDTH 1
+#define MADERA_HP3R_SC_EINT1 0x0020
+#define MADERA_HP3R_SC_EINT1_MASK 0x0020
+#define MADERA_HP3R_SC_EINT1_SHIFT 5
+#define MADERA_HP3R_SC_EINT1_WIDTH 1
+#define MADERA_HP3L_SC_EINT1 0x0010
+#define MADERA_HP3L_SC_EINT1_MASK 0x0010
+#define MADERA_HP3L_SC_EINT1_SHIFT 4
+#define MADERA_HP3L_SC_EINT1_WIDTH 1
+#define MADERA_HP2R_SC_EINT1 0x0008
+#define MADERA_HP2R_SC_EINT1_MASK 0x0008
+#define MADERA_HP2R_SC_EINT1_SHIFT 3
+#define MADERA_HP2R_SC_EINT1_WIDTH 1
+#define MADERA_HP2L_SC_EINT1 0x0004
+#define MADERA_HP2L_SC_EINT1_MASK 0x0004
+#define MADERA_HP2L_SC_EINT1_SHIFT 2
+#define MADERA_HP2L_SC_EINT1_WIDTH 1
+#define MADERA_HP1R_SC_EINT1 0x0002
+#define MADERA_HP1R_SC_EINT1_MASK 0x0002
+#define MADERA_HP1R_SC_EINT1_SHIFT 1
+#define MADERA_HP1R_SC_EINT1_WIDTH 1
+#define MADERA_HP1L_SC_EINT1 0x0001
+#define MADERA_HP1L_SC_EINT1_MASK 0x0001
+#define MADERA_HP1L_SC_EINT1_SHIFT 0
+#define MADERA_HP1L_SC_EINT1_WIDTH 1
+
+/* (0x180E) IRQ1_Status_15 */
+#define MADERA_SPK_OVERHEAT_WARN_EINT1 0x0004
+#define MADERA_SPK_OVERHEAT_WARN_EINT1_MASK 0x0004
+#define MADERA_SPK_OVERHEAT_WARN_EINT1_SHIFT 2
+#define MADERA_SPK_OVERHEAT_WARN_EINT1_WIDTH 1
+#define MADERA_SPK_OVERHEAT_EINT1 0x0002
+#define MADERA_SPK_OVERHEAT_EINT1_MASK 0x0002
+#define MADERA_SPK_OVERHEAT_EINT1_SHIFT 1
+#define MADERA_SPK_OVERHEAT_EINT1_WIDTH 1
+#define MADERA_SPK_SHUTDOWN_EINT1 0x0001
+#define MADERA_SPK_SHUTDOWN_EINT1_MASK 0x0001
+#define MADERA_SPK_SHUTDOWN_EINT1_SHIFT 0
+#define MADERA_SPK_SHUTDOWN_EINT1_WIDTH 1
+
+/* (0x1820) - IRQ1 Status 33 */
+#define MADERA_DSP7_BUS_ERR_EINT1 0x0040
+#define MADERA_DSP7_BUS_ERR_EINT1_MASK 0x0040
+#define MADERA_DSP7_BUS_ERR_EINT1_SHIFT 6
+#define MADERA_DSP7_BUS_ERR_EINT1_WIDTH 1
+#define MADERA_DSP6_BUS_ERR_EINT1 0x0020
+#define MADERA_DSP6_BUS_ERR_EINT1_MASK 0x0020
+#define MADERA_DSP6_BUS_ERR_EINT1_SHIFT 5
+#define MADERA_DSP6_BUS_ERR_EINT1_WIDTH 1
+#define MADERA_DSP5_BUS_ERR_EINT1 0x0010
+#define MADERA_DSP5_BUS_ERR_EINT1_MASK 0x0010
+#define MADERA_DSP5_BUS_ERR_EINT1_SHIFT 4
+#define MADERA_DSP5_BUS_ERR_EINT1_WIDTH 1
+#define MADERA_DSP4_BUS_ERR_EINT1 0x0008
+#define MADERA_DSP4_BUS_ERR_EINT1_MASK 0x0008
+#define MADERA_DSP4_BUS_ERR_EINT1_SHIFT 3
+#define MADERA_DSP4_BUS_ERR_EINT1_WIDTH 1
+#define MADERA_DSP3_BUS_ERR_EINT1 0x0004
+#define MADERA_DSP3_BUS_ERR_EINT1_MASK 0x0004
+#define MADERA_DSP3_BUS_ERR_EINT1_SHIFT 2
+#define MADERA_DSP3_BUS_ERR_EINT1_WIDTH 1
+#define MADERA_DSP2_BUS_ERR_EINT1 0x0002
+#define MADERA_DSP2_BUS_ERR_EINT1_MASK 0x0002
+#define MADERA_DSP2_BUS_ERR_EINT1_SHIFT 1
+#define MADERA_DSP2_BUS_ERR_EINT1_WIDTH 1
+#define MADERA_DSP1_BUS_ERR_EINT1 0x0001
+#define MADERA_DSP1_BUS_ERR_EINT1_MASK 0x0001
+#define MADERA_DSP1_BUS_ERR_EINT1_SHIFT 0
+#define MADERA_DSP1_BUS_ERR_EINT1_WIDTH 1
+
+/* (0x1845) IRQ1_Mask_6 */
+#define MADERA_IM_MICDET2_EINT1 0x0200
+#define MADERA_IM_MICDET2_EINT1_MASK 0x0200
+#define MADERA_IM_MICDET2_EINT1_SHIFT 9
+#define MADERA_IM_MICDET2_EINT1_WIDTH 1
+#define MADERA_IM_MICDET1_EINT1 0x0100
+#define MADERA_IM_MICDET1_EINT1_MASK 0x0100
+#define MADERA_IM_MICDET1_EINT1_SHIFT 8
+#define MADERA_IM_MICDET1_EINT1_WIDTH 1
+#define MADERA_IM_HPDET_EINT1 0x0001
+#define MADERA_IM_HPDET_EINT1_MASK 0x0001
+#define MADERA_IM_HPDET_EINT1_SHIFT 0
+#define MADERA_IM_HPDET_EINT1_WIDTH 1
+/* (0x184E) IRQ1_Mask_15 */
+#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1 0x0004
+#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1_MASK 0x0004
+#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1_SHIFT 2
+#define MADERA_IM_SPK_OVERHEAT_WARN_EINT1_WIDTH 1
+#define MADERA_IM_SPK_OVERHEAT_EINT1 0x0002
+#define MADERA_IM_SPK_OVERHEAT_EINT1_MASK 0x0002
+#define MADERA_IM_SPK_OVERHEAT_EINT1_SHIFT 1
+#define MADERA_IM_SPK_OVERHEAT_EINT1_WIDTH 1
+#define MADERA_IM_SPK_SHUTDOWN_EINT1 0x0001
+#define MADERA_IM_SPK_SHUTDOWN_EINT1_MASK 0x0001
+#define MADERA_IM_SPK_SHUTDOWN_EINT1_SHIFT 0
+#define MADERA_IM_SPK_SHUTDOWN_EINT1_WIDTH 1
+
+/* (0x1880) - IRQ1 Raw Status 1 */
+#define MADERA_CTRLIF_ERR_STS1 0x1000
+#define MADERA_CTRLIF_ERR_STS1_MASK 0x1000
+#define MADERA_CTRLIF_ERR_STS1_SHIFT 12
+#define MADERA_CTRLIF_ERR_STS1_WIDTH 1
+#define MADERA_SYSCLK_FAIL_STS1 0x0200
+#define MADERA_SYSCLK_FAIL_STS1_MASK 0x0200
+#define MADERA_SYSCLK_FAIL_STS1_SHIFT 9
+#define MADERA_SYSCLK_FAIL_STS1_WIDTH 1
+#define MADERA_CLOCK_DETECT_STS1 0x0100
+#define MADERA_CLOCK_DETECT_STS1_MASK 0x0100
+#define MADERA_CLOCK_DETECT_STS1_SHIFT 8
+#define MADERA_CLOCK_DETECT_STS1_WIDTH 1
+#define MADERA_BOOT_DONE_STS1 0x0080
+#define MADERA_BOOT_DONE_STS1_MASK 0x0080
+#define MADERA_BOOT_DONE_STS1_SHIFT 7
+#define MADERA_BOOT_DONE_STS1_WIDTH 1
+
+/* (0x1881) - IRQ1 Raw Status 2 */
+#define MADERA_FLL3_LOCK_STS1 0x0400
+#define MADERA_FLL3_LOCK_STS1_MASK 0x0400
+#define MADERA_FLL3_LOCK_STS1_SHIFT 10
+#define MADERA_FLL3_LOCK_STS1_WIDTH 1
+#define MADERA_FLL2_LOCK_STS1 0x0200
+#define MADERA_FLL2_LOCK_STS1_MASK 0x0200
+#define MADERA_FLL2_LOCK_STS1_SHIFT 9
+#define MADERA_FLL2_LOCK_STS1_WIDTH 1
+#define MADERA_FLL1_LOCK_STS1 0x0100
+#define MADERA_FLL1_LOCK_STS1_MASK 0x0100
+#define MADERA_FLL1_LOCK_STS1_SHIFT 8
+#define MADERA_FLL1_LOCK_STS1_WIDTH 1
+
+/* (0x1886) - IRQ1 Raw Status 7 */
+#define MADERA_MICD_CLAMP_FALL_STS1 0x0020
+#define MADERA_MICD_CLAMP_FALL_STS1_MASK 0x0020
+#define MADERA_MICD_CLAMP_FALL_STS1_SHIFT 5
+#define MADERA_MICD_CLAMP_FALL_STS1_WIDTH 1
+#define MADERA_MICD_CLAMP_RISE_STS1 0x0010
+#define MADERA_MICD_CLAMP_RISE_STS1_MASK 0x0010
+#define MADERA_MICD_CLAMP_RISE_STS1_SHIFT 4
+#define MADERA_MICD_CLAMP_RISE_STS1_WIDTH 1
+#define MADERA_JD2_FALL_STS1 0x0008
+#define MADERA_JD2_FALL_STS1_MASK 0x0008
+#define MADERA_JD2_FALL_STS1_SHIFT 3
+#define MADERA_JD2_FALL_STS1_WIDTH 1
+#define MADERA_JD2_RISE_STS1 0x0004
+#define MADERA_JD2_RISE_STS1_MASK 0x0004
+#define MADERA_JD2_RISE_STS1_SHIFT 2
+#define MADERA_JD2_RISE_STS1_WIDTH 1
+#define MADERA_JD1_FALL_STS1 0x0002
+#define MADERA_JD1_FALL_STS1_MASK 0x0002
+#define MADERA_JD1_FALL_STS1_SHIFT 1
+#define MADERA_JD1_FALL_STS1_WIDTH 1
+#define MADERA_JD1_RISE_STS1 0x0001
+#define MADERA_JD1_RISE_STS1_MASK 0x0001
+#define MADERA_JD1_RISE_STS1_SHIFT 0
+#define MADERA_JD1_RISE_STS1_WIDTH 1
+
+/* (0x188E) - IRQ1 Raw Status 15 */
+#define MADERA_SPK_OVERHEAT_WARN_STS1 0x0004
+#define MADERA_SPK_OVERHEAT_WARN_STS1_MASK 0x0004
+#define MADERA_SPK_OVERHEAT_WARN_STS1_SHIFT 2
+#define MADERA_SPK_OVERHEAT_WARN_STS1_WIDTH 1
+#define MADERA_SPK_OVERHEAT_STS1 0x0002
+#define MADERA_SPK_OVERHEAT_STS1_MASK 0x0002
+#define MADERA_SPK_OVERHEAT_STS1_SHIFT 1
+#define MADERA_SPK_OVERHEAT_STS1_WIDTH 1
+#define MADERA_SPK_SHUTDOWN_STS1 0x0001
+#define MADERA_SPK_SHUTDOWN_STS1_MASK 0x0001
+#define MADERA_SPK_SHUTDOWN_STS1_SHIFT 0
+#define MADERA_SPK_SHUTDOWN_STS1_WIDTH 1
+
+/* (0x1A06) Interrupt_Debounce_7 */
+#define MADERA_MICD_CLAMP_DB 0x0010
+#define MADERA_MICD_CLAMP_DB_MASK 0x0010
+#define MADERA_MICD_CLAMP_DB_SHIFT 4
+#define MADERA_MICD_CLAMP_DB_WIDTH 1
+#define MADERA_JD2_DB 0x0004
+#define MADERA_JD2_DB_MASK 0x0004
+#define MADERA_JD2_DB_SHIFT 2
+#define MADERA_JD2_DB_WIDTH 1
+#define MADERA_JD1_DB 0x0001
+#define MADERA_JD1_DB_MASK 0x0001
+#define MADERA_JD1_DB_SHIFT 0
+#define MADERA_JD1_DB_WIDTH 1
+
+/* (0x1A0E) Interrupt_Debounce_15 */
+#define MADERA_SPK_OVERHEAT_WARN_DB 0x0004
+#define MADERA_SPK_OVERHEAT_WARN_DB_MASK 0x0004
+#define MADERA_SPK_OVERHEAT_WARN_DB_SHIFT 2
+#define MADERA_SPK_OVERHEAT_WARN_DB_WIDTH 1
+#define MADERA_SPK_OVERHEAT_DB 0x0002
+#define MADERA_SPK_OVERHEAT_DB_MASK 0x0002
+#define MADERA_SPK_OVERHEAT_DB_SHIFT 1
+#define MADERA_SPK_OVERHEAT_DB_WIDTH 1
+
+/* (0x1A80) IRQ1_CTRL */
+#define MADERA_IM_IRQ1 0x0800
+#define MADERA_IM_IRQ1_MASK 0x0800
+#define MADERA_IM_IRQ1_SHIFT 11
+#define MADERA_IM_IRQ1_WIDTH 1
+#define MADERA_IRQ_POL 0x0400
+#define MADERA_IRQ_POL_MASK 0x0400
+#define MADERA_IRQ_POL_SHIFT 10
+#define MADERA_IRQ_POL_WIDTH 1
+
+/* (0x20004) OTP_HPDET_Cal_1 */
+#define MADERA_OTP_HPDET_CALIB_OFFSET_11 0xFF000000
+#define MADERA_OTP_HPDET_CALIB_OFFSET_11_MASK 0xFF000000
+#define MADERA_OTP_HPDET_CALIB_OFFSET_11_SHIFT 24
+#define MADERA_OTP_HPDET_CALIB_OFFSET_11_WIDTH 8
+#define MADERA_OTP_HPDET_CALIB_OFFSET_10 0x00FF0000
+#define MADERA_OTP_HPDET_CALIB_OFFSET_10_MASK 0x00FF0000
+#define MADERA_OTP_HPDET_CALIB_OFFSET_10_SHIFT 16
+#define MADERA_OTP_HPDET_CALIB_OFFSET_10_WIDTH 8
+#define MADERA_OTP_HPDET_CALIB_OFFSET_01 0x0000FF00
+#define MADERA_OTP_HPDET_CALIB_OFFSET_01_MASK 0x0000FF00
+#define MADERA_OTP_HPDET_CALIB_OFFSET_01_SHIFT 8
+#define MADERA_OTP_HPDET_CALIB_OFFSET_01_WIDTH 8
+#define MADERA_OTP_HPDET_CALIB_OFFSET_00 0x000000FF
+#define MADERA_OTP_HPDET_CALIB_OFFSET_00_MASK 0x000000FF
+#define MADERA_OTP_HPDET_CALIB_OFFSET_00_SHIFT 0
+#define MADERA_OTP_HPDET_CALIB_OFFSET_00_WIDTH 8
+
+/* (0x20006) OTP_HPDET_Cal_2 */
+#define MADERA_OTP_HPDET_GRADIENT_1X 0x0000FF00
+#define MADERA_OTP_HPDET_GRADIENT_1X_MASK 0x0000FF00
+#define MADERA_OTP_HPDET_GRADIENT_1X_SHIFT 8
+#define MADERA_OTP_HPDET_GRADIENT_1X_WIDTH 8
+#define MADERA_OTP_HPDET_GRADIENT_0X 0x000000FF
+#define MADERA_OTP_HPDET_GRADIENT_0X_MASK 0x000000FF
+#define MADERA_OTP_HPDET_GRADIENT_0X_SHIFT 0
+#define MADERA_OTP_HPDET_GRADIENT_0X_WIDTH 8
+
+#endif
diff --git a/include/linux/mfd/rave-sp.h b/include/linux/mfd/rave-sp.h
index fe0ce7bc59cf..11eef77ef976 100644
--- a/include/linux/mfd/rave-sp.h
+++ b/include/linux/mfd/rave-sp.h
@@ -21,6 +21,7 @@ enum rave_sp_command {
RAVE_SP_CMD_STATUS = 0xA0,
RAVE_SP_CMD_SW_WDT = 0xA1,
RAVE_SP_CMD_PET_WDT = 0xA2,
+ RAVE_SP_CMD_RMB_EEPROM = 0xA4,
RAVE_SP_CMD_SET_BACKLIGHT = 0xA6,
RAVE_SP_CMD_RESET = 0xA7,
RAVE_SP_CMD_RESET_REASON = 0xA8,
diff --git a/include/linux/mfd/rohm-bd718x7.h b/include/linux/mfd/rohm-bd718x7.h
new file mode 100644
index 000000000000..a528747f8aed
--- /dev/null
+++ b/include/linux/mfd/rohm-bd718x7.h
@@ -0,0 +1,332 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2018 ROHM Semiconductors */
+
+#ifndef __LINUX_MFD_BD71837_H__
+#define __LINUX_MFD_BD71837_H__
+
+#include <linux/regmap.h>
+
+enum {
+ BD71837_BUCK1 = 0,
+ BD71837_BUCK2,
+ BD71837_BUCK3,
+ BD71837_BUCK4,
+ BD71837_BUCK5,
+ BD71837_BUCK6,
+ BD71837_BUCK7,
+ BD71837_BUCK8,
+ BD71837_LDO1,
+ BD71837_LDO2,
+ BD71837_LDO3,
+ BD71837_LDO4,
+ BD71837_LDO5,
+ BD71837_LDO6,
+ BD71837_LDO7,
+ BD71837_REGULATOR_CNT,
+};
+
+#define BD71837_BUCK1_VOLTAGE_NUM 0x40
+#define BD71837_BUCK2_VOLTAGE_NUM 0x40
+#define BD71837_BUCK3_VOLTAGE_NUM 0x40
+#define BD71837_BUCK4_VOLTAGE_NUM 0x40
+
+#define BD71837_BUCK5_VOLTAGE_NUM 0x08
+#define BD71837_BUCK6_VOLTAGE_NUM 0x04
+#define BD71837_BUCK7_VOLTAGE_NUM 0x08
+#define BD71837_BUCK8_VOLTAGE_NUM 0x40
+
+#define BD71837_LDO1_VOLTAGE_NUM 0x04
+#define BD71837_LDO2_VOLTAGE_NUM 0x02
+#define BD71837_LDO3_VOLTAGE_NUM 0x10
+#define BD71837_LDO4_VOLTAGE_NUM 0x10
+#define BD71837_LDO5_VOLTAGE_NUM 0x10
+#define BD71837_LDO6_VOLTAGE_NUM 0x10
+#define BD71837_LDO7_VOLTAGE_NUM 0x10
+
+enum {
+ BD71837_REG_REV = 0x00,
+ BD71837_REG_SWRESET = 0x01,
+ BD71837_REG_I2C_DEV = 0x02,
+ BD71837_REG_PWRCTRL0 = 0x03,
+ BD71837_REG_PWRCTRL1 = 0x04,
+ BD71837_REG_BUCK1_CTRL = 0x05,
+ BD71837_REG_BUCK2_CTRL = 0x06,
+ BD71837_REG_BUCK3_CTRL = 0x07,
+ BD71837_REG_BUCK4_CTRL = 0x08,
+ BD71837_REG_BUCK5_CTRL = 0x09,
+ BD71837_REG_BUCK6_CTRL = 0x0A,
+ BD71837_REG_BUCK7_CTRL = 0x0B,
+ BD71837_REG_BUCK8_CTRL = 0x0C,
+ BD71837_REG_BUCK1_VOLT_RUN = 0x0D,
+ BD71837_REG_BUCK1_VOLT_IDLE = 0x0E,
+ BD71837_REG_BUCK1_VOLT_SUSP = 0x0F,
+ BD71837_REG_BUCK2_VOLT_RUN = 0x10,
+ BD71837_REG_BUCK2_VOLT_IDLE = 0x11,
+ BD71837_REG_BUCK3_VOLT_RUN = 0x12,
+ BD71837_REG_BUCK4_VOLT_RUN = 0x13,
+ BD71837_REG_BUCK5_VOLT = 0x14,
+ BD71837_REG_BUCK6_VOLT = 0x15,
+ BD71837_REG_BUCK7_VOLT = 0x16,
+ BD71837_REG_BUCK8_VOLT = 0x17,
+ BD71837_REG_LDO1_VOLT = 0x18,
+ BD71837_REG_LDO2_VOLT = 0x19,
+ BD71837_REG_LDO3_VOLT = 0x1A,
+ BD71837_REG_LDO4_VOLT = 0x1B,
+ BD71837_REG_LDO5_VOLT = 0x1C,
+ BD71837_REG_LDO6_VOLT = 0x1D,
+ BD71837_REG_LDO7_VOLT = 0x1E,
+ BD71837_REG_TRANS_COND0 = 0x1F,
+ BD71837_REG_TRANS_COND1 = 0x20,
+ BD71837_REG_VRFAULTEN = 0x21,
+ BD71837_REG_MVRFLTMASK0 = 0x22,
+ BD71837_REG_MVRFLTMASK1 = 0x23,
+ BD71837_REG_MVRFLTMASK2 = 0x24,
+ BD71837_REG_RCVCFG = 0x25,
+ BD71837_REG_RCVNUM = 0x26,
+ BD71837_REG_PWRONCONFIG0 = 0x27,
+ BD71837_REG_PWRONCONFIG1 = 0x28,
+ BD71837_REG_RESETSRC = 0x29,
+ BD71837_REG_MIRQ = 0x2A,
+ BD71837_REG_IRQ = 0x2B,
+ BD71837_REG_IN_MON = 0x2C,
+ BD71837_REG_POW_STATE = 0x2D,
+ BD71837_REG_OUT32K = 0x2E,
+ BD71837_REG_REGLOCK = 0x2F,
+ BD71837_REG_OTPVER = 0xFF,
+ BD71837_MAX_REGISTER = 0x100,
+};
+
+#define REGLOCK_PWRSEQ 0x1
+#define REGLOCK_VREG 0x10
+
+/* Generic BUCK control masks */
+#define BD71837_BUCK_SEL 0x02
+#define BD71837_BUCK_EN 0x01
+#define BD71837_BUCK_RUN_ON 0x04
+
+/* Generic LDO masks */
+#define BD71837_LDO_SEL 0x80
+#define BD71837_LDO_EN 0x40
+
+/* BD71837 BUCK ramp rate CTRL reg bits */
+#define BUCK_RAMPRATE_MASK 0xC0
+#define BUCK_RAMPRATE_10P00MV 0x0
+#define BUCK_RAMPRATE_5P00MV 0x1
+#define BUCK_RAMPRATE_2P50MV 0x2
+#define BUCK_RAMPRATE_1P25MV 0x3
+
+/* BD71837_REG_BUCK1_VOLT_RUN bits */
+#define BUCK1_RUN_MASK 0x3F
+#define BUCK1_RUN_DEFAULT 0x14
+
+/* BD71837_REG_BUCK1_VOLT_SUSP bits */
+#define BUCK1_SUSP_MASK 0x3F
+#define BUCK1_SUSP_DEFAULT 0x14
+
+/* BD71837_REG_BUCK1_VOLT_IDLE bits */
+#define BUCK1_IDLE_MASK 0x3F
+#define BUCK1_IDLE_DEFAULT 0x14
+
+/* BD71837_REG_BUCK2_VOLT_RUN bits */
+#define BUCK2_RUN_MASK 0x3F
+#define BUCK2_RUN_DEFAULT 0x1E
+
+/* BD71837_REG_BUCK2_VOLT_IDLE bits */
+#define BUCK2_IDLE_MASK 0x3F
+#define BUCK2_IDLE_DEFAULT 0x14
+
+/* BD71837_REG_BUCK3_VOLT_RUN bits */
+#define BUCK3_RUN_MASK 0x3F
+#define BUCK3_RUN_DEFAULT 0x1E
+
+/* BD71837_REG_BUCK4_VOLT_RUN bits */
+#define BUCK4_RUN_MASK 0x3F
+#define BUCK4_RUN_DEFAULT 0x1E
+
+/* BD71837_REG_BUCK5_VOLT bits */
+#define BUCK5_MASK 0x07
+#define BUCK5_DEFAULT 0x02
+
+/* BD71837_REG_BUCK6_VOLT bits */
+#define BUCK6_MASK 0x03
+#define BUCK6_DEFAULT 0x03
+
+/* BD71837_REG_BUCK7_VOLT bits */
+#define BUCK7_MASK 0x07
+#define BUCK7_DEFAULT 0x03
+
+/* BD71837_REG_BUCK8_VOLT bits */
+#define BUCK8_MASK 0x3F
+#define BUCK8_DEFAULT 0x1E
+
+/* BD71837_REG_IRQ bits */
+#define IRQ_SWRST 0x40
+#define IRQ_PWRON_S 0x20
+#define IRQ_PWRON_L 0x10
+#define IRQ_PWRON 0x08
+#define IRQ_WDOG 0x04
+#define IRQ_ON_REQ 0x02
+#define IRQ_STBY_REQ 0x01
+
+/* BD71837_REG_OUT32K bits */
+#define BD71837_OUT32K_EN 0x01
+
+/* BD71837 gated clock rate */
+#define BD71837_CLK_RATE 32768
+
+/* ROHM BD71837 irqs */
+enum {
+ BD71837_INT_STBY_REQ,
+ BD71837_INT_ON_REQ,
+ BD71837_INT_WDOG,
+ BD71837_INT_PWRBTN,
+ BD71837_INT_PWRBTN_L,
+ BD71837_INT_PWRBTN_S,
+ BD71837_INT_SWRST
+};
+
+/* ROHM BD71837 interrupt masks */
+#define BD71837_INT_SWRST_MASK 0x40
+#define BD71837_INT_PWRBTN_S_MASK 0x20
+#define BD71837_INT_PWRBTN_L_MASK 0x10
+#define BD71837_INT_PWRBTN_MASK 0x8
+#define BD71837_INT_WDOG_MASK 0x4
+#define BD71837_INT_ON_REQ_MASK 0x2
+#define BD71837_INT_STBY_REQ_MASK 0x1
+
+/* BD71837_REG_LDO1_VOLT bits */
+#define LDO1_MASK 0x03
+
+/* BD71837_REG_LDO1_VOLT bits */
+#define LDO2_MASK 0x20
+
+/* BD71837_REG_LDO3_VOLT bits */
+#define LDO3_MASK 0x0F
+
+/* BD71837_REG_LDO4_VOLT bits */
+#define LDO4_MASK 0x0F
+
+/* BD71837_REG_LDO5_VOLT bits */
+#define LDO5_MASK 0x0F
+
+/* BD71837_REG_LDO6_VOLT bits */
+#define LDO6_MASK 0x0F
+
+/* BD71837_REG_LDO7_VOLT bits */
+#define LDO7_MASK 0x0F
+
+/* Register write induced reset settings */
+
+/*
+ * Even though the bit zero is not SWRESET type we still want to write zero
+ * to it when changing type. Bit zero is 'SWRESET' trigger bit and if we
+ * write 1 to it we will trigger the action. So always write 0 to it when
+ * changning SWRESET action - no matter what we read from it.
+ */
+#define BD71837_SWRESET_TYPE_MASK 7
+#define BD71837_SWRESET_TYPE_DISABLED 0
+#define BD71837_SWRESET_TYPE_COLD 4
+#define BD71837_SWRESET_TYPE_WARM 6
+
+#define BD71837_SWRESET_RESET_MASK 1
+#define BD71837_SWRESET_RESET 1
+
+/* Poweroff state transition conditions */
+
+#define BD718XX_ON_REQ_POWEROFF_MASK 1
+#define BD718XX_SWRESET_POWEROFF_MASK 2
+#define BD718XX_WDOG_POWEROFF_MASK 4
+#define BD718XX_KEY_L_POWEROFF_MASK 8
+
+#define BD718XX_POWOFF_TO_SNVS 0
+#define BD718XX_POWOFF_TO_RDY 0xF
+
+#define BD718XX_POWOFF_TIME_MASK 0xF0
+enum {
+ BD718XX_POWOFF_TIME_5MS = 0,
+ BD718XX_POWOFF_TIME_10MS,
+ BD718XX_POWOFF_TIME_15MS,
+ BD718XX_POWOFF_TIME_20MS,
+ BD718XX_POWOFF_TIME_25MS,
+ BD718XX_POWOFF_TIME_30MS,
+ BD718XX_POWOFF_TIME_35MS,
+ BD718XX_POWOFF_TIME_40MS,
+ BD718XX_POWOFF_TIME_45MS,
+ BD718XX_POWOFF_TIME_50MS,
+ BD718XX_POWOFF_TIME_75MS,
+ BD718XX_POWOFF_TIME_100MS,
+ BD718XX_POWOFF_TIME_250MS,
+ BD718XX_POWOFF_TIME_500MS,
+ BD718XX_POWOFF_TIME_750MS,
+ BD718XX_POWOFF_TIME_1500MS
+};
+
+/* Poweron sequence state transition conditions */
+#define BD718XX_RDY_TO_SNVS_MASK 0xF
+#define BD718XX_SNVS_TO_RUN_MASK 0xF0
+
+#define BD718XX_PWR_TRIG_KEY_L 1
+#define BD718XX_PWR_TRIG_KEY_S 2
+#define BD718XX_PWR_TRIG_PMIC_ON 4
+#define BD718XX_PWR_TRIG_VSYS_UVLO 8
+#define BD718XX_RDY_TO_SNVS_SIFT 0
+#define BD718XX_SNVS_TO_RUN_SIFT 4
+
+#define BD718XX_PWRBTN_PRESS_DURATION_MASK 0xF
+
+/* Timeout value for detecting short press */
+enum {
+ BD718XX_PWRBTN_SHORT_PRESS_10MS = 0,
+ BD718XX_PWRBTN_SHORT_PRESS_500MS,
+ BD718XX_PWRBTN_SHORT_PRESS_1000MS,
+ BD718XX_PWRBTN_SHORT_PRESS_1500MS,
+ BD718XX_PWRBTN_SHORT_PRESS_2000MS,
+ BD718XX_PWRBTN_SHORT_PRESS_2500MS,
+ BD718XX_PWRBTN_SHORT_PRESS_3000MS,
+ BD718XX_PWRBTN_SHORT_PRESS_3500MS,
+ BD718XX_PWRBTN_SHORT_PRESS_4000MS,
+ BD718XX_PWRBTN_SHORT_PRESS_4500MS,
+ BD718XX_PWRBTN_SHORT_PRESS_5000MS,
+ BD718XX_PWRBTN_SHORT_PRESS_5500MS,
+ BD718XX_PWRBTN_SHORT_PRESS_6000MS,
+ BD718XX_PWRBTN_SHORT_PRESS_6500MS,
+ BD718XX_PWRBTN_SHORT_PRESS_7000MS,
+ BD718XX_PWRBTN_SHORT_PRESS_7500MS
+};
+
+/* Timeout value for detecting LONG press */
+enum {
+ BD718XX_PWRBTN_LONG_PRESS_10MS = 0,
+ BD718XX_PWRBTN_LONG_PRESS_1S,
+ BD718XX_PWRBTN_LONG_PRESS_2S,
+ BD718XX_PWRBTN_LONG_PRESS_3S,
+ BD718XX_PWRBTN_LONG_PRESS_4S,
+ BD718XX_PWRBTN_LONG_PRESS_5S,
+ BD718XX_PWRBTN_LONG_PRESS_6S,
+ BD718XX_PWRBTN_LONG_PRESS_7S,
+ BD718XX_PWRBTN_LONG_PRESS_8S,
+ BD718XX_PWRBTN_LONG_PRESS_9S,
+ BD718XX_PWRBTN_LONG_PRESS_10S,
+ BD718XX_PWRBTN_LONG_PRESS_11S,
+ BD718XX_PWRBTN_LONG_PRESS_12S,
+ BD718XX_PWRBTN_LONG_PRESS_13S,
+ BD718XX_PWRBTN_LONG_PRESS_14S,
+ BD718XX_PWRBTN_LONG_PRESS_15S
+};
+
+struct bd71837_pmic;
+struct bd71837_clk;
+
+struct bd71837 {
+ struct device *dev;
+ struct regmap *regmap;
+ unsigned long int id;
+
+ int chip_irq;
+ struct regmap_irq_chip_data *irq_data;
+
+ struct bd71837_pmic *pmic;
+ struct bd71837_clk *clk;
+};
+
+#endif /* __LINUX_MFD_BD71837_H__ */
diff --git a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
index e06f5f79eaef..6c1ad160ed87 100644
--- a/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx6q-iomuxc-gpr.h
@@ -457,4 +457,7 @@
#define MCLK_DIR(x) (x == 1 ? IMX6UL_GPR1_SAI1_MCLK_DIR : x == 2 ? \
IMX6UL_GPR1_SAI2_MCLK_DIR : IMX6UL_GPR1_SAI3_MCLK_DIR)
+/* For imx6sll iomux gpr register field define */
+#define IMX6SLL_GPR5_AFCG_X_BYPASS_MASK (0x1f << 11)
+
#endif /* __LINUX_IMX6Q_IOMUXC_GPR_H */
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 91f92215ca74..77866214ab51 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -90,6 +90,9 @@
/* Some controllers have a CBSY bit */
#define TMIO_MMC_HAVE_CBSY BIT(11)
+/* Some controllers that support HS400 use use 4 taps while others use 8. */
+#define TMIO_MMC_HAVE_4TAP_HS400 BIT(13)
+
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state);
diff --git a/include/linux/mfd/wm8994/pdata.h b/include/linux/mfd/wm8994/pdata.h
index 90c60524a496..b19c370fe81a 100644
--- a/include/linux/mfd/wm8994/pdata.h
+++ b/include/linux/mfd/wm8994/pdata.h
@@ -222,6 +222,12 @@ struct wm8994_pdata {
*/
bool spkmode_pu;
+ /*
+ * CS/ADDR must be pulled internally by the device on this
+ * system.
+ */
+ bool csnaddr_pd;
+
/**
* Maximum number of channels clocks will be generated for,
* useful for systems where and I2S bus with multiple data
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 122e7e9d3091..dca6ab4eaa99 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -630,6 +630,7 @@ struct mlx4_caps {
u32 vf_caps;
bool wol_port[MLX4_MAX_PORTS + 1];
struct mlx4_rate_limit_caps rl_caps;
+ u32 health_buffer_addrs;
};
struct mlx4_buf_list {
@@ -851,6 +852,12 @@ struct mlx4_vf_dev {
u8 n_ports;
};
+struct mlx4_fw_crdump {
+ bool snapshot_enable;
+ struct devlink_region *region_crspace;
+ struct devlink_region *region_fw_health;
+};
+
enum mlx4_pci_status {
MLX4_PCI_STATUS_DISABLED,
MLX4_PCI_STATUS_ENABLED,
@@ -871,6 +878,7 @@ struct mlx4_dev_persistent {
u8 interface_state;
struct mutex pci_status_mutex; /* sync pci state */
enum mlx4_pci_status pci_status;
+ struct mlx4_fw_crdump crdump;
};
struct mlx4_dev {
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 02f72ebf31a7..11fa4e66afc5 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -332,6 +332,13 @@ enum mlx5_event {
MLX5_EVENT_TYPE_FPGA_ERROR = 0x20,
MLX5_EVENT_TYPE_FPGA_QP_ERROR = 0x21,
+
+ MLX5_EVENT_TYPE_DEVICE_TRACER = 0x26,
+};
+
+enum {
+ MLX5_TRACER_SUBTYPE_OWNERSHIP_CHANGE = 0x0,
+ MLX5_TRACER_SUBTYPE_TRACES_AVAILABLE = 0x1,
};
enum {
@@ -750,7 +757,7 @@ enum {
#define MLX5_MINI_CQE_ARRAY_SIZE 8
-static inline int mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
+static inline u8 mlx5_get_cqe_format(struct mlx5_cqe64 *cqe)
{
return (cqe->op_own >> 2) & 0x3;
}
@@ -770,14 +777,14 @@ static inline u8 get_cqe_l3_hdr_type(struct mlx5_cqe64 *cqe)
return (cqe->l4_l3_hdr_type >> 2) & 0x3;
}
-static inline u8 cqe_is_tunneled(struct mlx5_cqe64 *cqe)
+static inline bool cqe_is_tunneled(struct mlx5_cqe64 *cqe)
{
return cqe->outer_l3_tunneled & 0x1;
}
-static inline int cqe_has_vlan(struct mlx5_cqe64 *cqe)
+static inline bool cqe_has_vlan(struct mlx5_cqe64 *cqe)
{
- return !!(cqe->l4_l3_hdr_type & 0x1);
+ return cqe->l4_l3_hdr_type & 0x1;
}
static inline u64 get_cqe_ts(struct mlx5_cqe64 *cqe)
@@ -939,9 +946,9 @@ enum {
};
enum {
- MLX5_ESW_VPORT_ADMIN_STATE_DOWN = 0x0,
- MLX5_ESW_VPORT_ADMIN_STATE_UP = 0x1,
- MLX5_ESW_VPORT_ADMIN_STATE_AUTO = 0x2,
+ MLX5_VPORT_ADMIN_STATE_DOWN = 0x0,
+ MLX5_VPORT_ADMIN_STATE_UP = 0x1,
+ MLX5_VPORT_ADMIN_STATE_AUTO = 0x2,
};
enum {
@@ -1071,6 +1078,9 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_GEN(mdev, cap) \
MLX5_GET(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
+#define MLX5_CAP_GEN_64(mdev, cap) \
+ MLX5_GET64(cmd_hca_cap, mdev->caps.hca_cur[MLX5_CAP_GENERAL], cap)
+
#define MLX5_CAP_GEN_MAX(mdev, cap) \
MLX5_GET(cmd_hca_cap, mdev->caps.hca_max[MLX5_CAP_GENERAL], cap)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 80cbb7fdce4a..7a452716de4b 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -138,9 +138,14 @@ enum {
MLX5_REG_HOST_ENDIANNESS = 0x7004,
MLX5_REG_MCIA = 0x9014,
MLX5_REG_MLCR = 0x902b,
+ MLX5_REG_MTRC_CAP = 0x9040,
+ MLX5_REG_MTRC_CONF = 0x9041,
+ MLX5_REG_MTRC_STDB = 0x9042,
+ MLX5_REG_MTRC_CTRL = 0x9043,
MLX5_REG_MPCNT = 0x9051,
MLX5_REG_MTPPS = 0x9053,
MLX5_REG_MTPPSE = 0x9054,
+ MLX5_REG_MPEGC = 0x9056,
MLX5_REG_MCQI = 0x9061,
MLX5_REG_MCC = 0x9062,
MLX5_REG_MCDA = 0x9063,
@@ -358,6 +363,7 @@ struct mlx5_frag_buf_ctrl {
struct mlx5_frag_buf frag_buf;
u32 sz_m1;
u32 frag_sz_m1;
+ u32 strides_offset;
u8 log_sz;
u8 log_stride;
u8 log_frag_strides;
@@ -811,6 +817,9 @@ struct mlx5_clock {
struct mlx5_pps pps_info;
};
+struct mlx5_fw_tracer;
+struct mlx5_vxlan;
+
struct mlx5_core_dev {
struct pci_dev *pdev;
/* sync pci state */
@@ -842,6 +851,7 @@ struct mlx5_core_dev {
atomic_t num_qps;
u32 issi;
struct mlx5e_resources mlx5e_res;
+ struct mlx5_vxlan *vxlan;
struct {
struct mlx5_rsvd_gids reserved_gids;
u32 roce_en;
@@ -855,6 +865,7 @@ struct mlx5_core_dev {
struct mlx5_clock clock;
struct mlx5_ib_clock_info *clock_info;
struct page *clock_info_page;
+ struct mlx5_fw_tracer *tracer;
};
struct mlx5_db {
@@ -983,14 +994,22 @@ static inline u32 mlx5_base_mkey(const u32 key)
return key & 0xffffff00u;
}
-static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
- struct mlx5_frag_buf_ctrl *fbc)
+static inline void mlx5_fill_fbc_offset(u8 log_stride, u8 log_sz,
+ u32 strides_offset,
+ struct mlx5_frag_buf_ctrl *fbc)
{
fbc->log_stride = log_stride;
fbc->log_sz = log_sz;
fbc->sz_m1 = (1 << fbc->log_sz) - 1;
fbc->log_frag_strides = PAGE_SHIFT - fbc->log_stride;
fbc->frag_sz_m1 = (1 << fbc->log_frag_strides) - 1;
+ fbc->strides_offset = strides_offset;
+}
+
+static inline void mlx5_fill_fbc(u8 log_stride, u8 log_sz,
+ struct mlx5_frag_buf_ctrl *fbc)
+{
+ mlx5_fill_fbc_offset(log_stride, log_sz, 0, fbc);
}
static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
@@ -1004,7 +1023,10 @@ static inline void mlx5_core_init_cq_frag_buf(struct mlx5_frag_buf_ctrl *fbc,
static inline void *mlx5_frag_buf_get_wqe(struct mlx5_frag_buf_ctrl *fbc,
u32 ix)
{
- unsigned int frag = (ix >> fbc->log_frag_strides);
+ unsigned int frag;
+
+ ix += fbc->strides_offset;
+ frag = ix >> fbc->log_frag_strides;
return fbc->frag_buf.frags[frag].buf +
((fbc->frag_sz_m1 & ix) << fbc->log_stride);
@@ -1067,8 +1089,6 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey);
int mlx5_core_query_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *mkey,
u32 *out, int outlen);
-int mlx5_core_dump_fill_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mkey *_mkey,
- u32 *mkey);
int mlx5_core_alloc_pd(struct mlx5_core_dev *dev, u32 *pdn);
int mlx5_core_dealloc_pd(struct mlx5_core_dev *dev, u32 pdn);
int mlx5_core_mad_ifc(struct mlx5_core_dev *dev, const void *inb, void *outb,
@@ -1215,14 +1235,11 @@ struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
{
return ERR_PTR(-EOPNOTSUPP);
}
-
-static inline void mlx5_rdma_netdev_free(struct net_device *netdev) {}
#else
struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
struct ib_device *ibdev,
const char *name,
void (*setup)(struct net_device *));
-void mlx5_rdma_netdev_free(struct net_device *netdev);
#endif /* CONFIG_MLX5_CORE_IPOIB */
struct mlx5_profile {
diff --git a/include/linux/mlx5/eswitch.h b/include/linux/mlx5/eswitch.h
index d3c9db492b30..fab5121ffb8f 100644
--- a/include/linux/mlx5/eswitch.h
+++ b/include/linux/mlx5/eswitch.h
@@ -8,6 +8,8 @@
#include <linux/mlx5/driver.h>
+#define MLX5_ESWITCH_MANAGER(mdev) MLX5_CAP_GEN(mdev, eswitch_manager)
+
enum {
SRIOV_NONE,
SRIOV_LEGACY,
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 757b4a30281e..804516e4f483 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -89,6 +89,7 @@ struct mlx5_flow_destination {
enum mlx5_flow_destination_type type;
union {
u32 tir_num;
+ u32 ft_num;
struct mlx5_flow_table *ft;
struct mlx5_fc *counter;
struct {
@@ -152,6 +153,8 @@ struct mlx5_fs_vlan {
u8 prio;
};
+#define MLX5_FS_VLAN_DEPTH 2
+
struct mlx5_flow_act {
u32 action;
bool has_flow_tag;
@@ -159,7 +162,7 @@ struct mlx5_flow_act {
u32 encap_id;
u32 modify_id;
uintptr_t esp_id;
- struct mlx5_fs_vlan vlan;
+ struct mlx5_fs_vlan vlan[MLX5_FS_VLAN_DEPTH];
struct ib_counters *counters;
};
@@ -175,7 +178,7 @@ mlx5_add_flow_rules(struct mlx5_flow_table *ft,
struct mlx5_flow_spec *spec,
struct mlx5_flow_act *flow_act,
struct mlx5_flow_destination *dest,
- int dest_num);
+ int num_dest);
void mlx5_del_flow_rules(struct mlx5_flow_handle *fr);
int mlx5_modify_rule_destination(struct mlx5_flow_handle *handler,
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 27134c4fcb76..f043d65b9bac 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -76,6 +76,16 @@ enum {
};
enum {
+ MLX5_GENERAL_OBJ_TYPES_CAP_UCTX = (1ULL << 4),
+ MLX5_GENERAL_OBJ_TYPES_CAP_UMEM = (1ULL << 5),
+};
+
+enum {
+ MLX5_OBJ_TYPE_UCTX = 0x0004,
+ MLX5_OBJ_TYPE_UMEM = 0x0005,
+};
+
+enum {
MLX5_CMD_OP_QUERY_HCA_CAP = 0x100,
MLX5_CMD_OP_QUERY_ADAPTER = 0x101,
MLX5_CMD_OP_INIT_HCA = 0x102,
@@ -237,11 +247,16 @@ enum {
MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e,
MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940,
MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941,
+ MLX5_CMD_OP_QUERY_MODIFY_HEADER_CONTEXT = 0x942,
MLX5_CMD_OP_FPGA_CREATE_QP = 0x960,
MLX5_CMD_OP_FPGA_MODIFY_QP = 0x961,
MLX5_CMD_OP_FPGA_QUERY_QP = 0x962,
MLX5_CMD_OP_FPGA_DESTROY_QP = 0x963,
MLX5_CMD_OP_FPGA_QUERY_QP_COUNTERS = 0x964,
+ MLX5_CMD_OP_CREATE_GENERAL_OBJECT = 0xa00,
+ MLX5_CMD_OP_MODIFY_GENERAL_OBJECT = 0xa01,
+ MLX5_CMD_OP_QUERY_GENERAL_OBJECT = 0xa02,
+ MLX5_CMD_OP_DESTROY_GENERAL_OBJECT = 0xa03,
MLX5_CMD_OP_MAX
};
@@ -326,7 +341,10 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_9[0x1];
u8 pop_vlan[0x1];
u8 push_vlan[0x1];
- u8 reserved_at_c[0x14];
+ u8 reserved_at_c[0x1];
+ u8 pop_vlan_2[0x1];
+ u8 push_vlan_2[0x1];
+ u8 reserved_at_f[0x11];
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
@@ -654,7 +672,9 @@ struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
u8 swp[0x1];
u8 swp_csum[0x1];
u8 swp_lso[0x1];
- u8 reserved_at_23[0x1b];
+ u8 reserved_at_23[0xd];
+ u8 max_vxlan_udp_ports[0x8];
+ u8 reserved_at_38[0x6];
u8 max_geneve_opt_len[0x1];
u8 tunnel_stateless_geneve_rx[0x1];
@@ -874,7 +894,9 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_eq_sz[0x8];
u8 reserved_at_e8[0x2];
u8 log_max_mkey[0x6];
- u8 reserved_at_f0[0xc];
+ u8 reserved_at_f0[0x8];
+ u8 dump_fill_mkey[0x1];
+ u8 reserved_at_f9[0x3];
u8 log_max_eq[0x4];
u8 max_indirection[0x8];
@@ -922,7 +944,7 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 vnic_env_queue_counters[0x1];
u8 ets[0x1];
u8 nic_flow_table[0x1];
- u8 eswitch_flow_table[0x1];
+ u8 eswitch_manager[0x1];
u8 device_memory[0x1];
u8 mcam_reg[0x1];
u8 pcam_reg[0x1];
@@ -1113,7 +1135,12 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_3f8[0x3];
u8 log_max_current_uc_list[0x5];
- u8 reserved_at_400[0x80];
+ u8 general_obj_types[0x40];
+
+ u8 reserved_at_440[0x20];
+
+ u8 reserved_at_460[0x10];
+ u8 max_num_eqs[0x10];
u8 reserved_at_480[0x3];
u8 log_max_l2_table[0x5];
@@ -1162,6 +1189,7 @@ enum mlx5_flow_destination_type {
MLX5_FLOW_DESTINATION_TYPE_PORT = 0x99,
MLX5_FLOW_DESTINATION_TYPE_COUNTER = 0x100,
+ MLX5_FLOW_DESTINATION_TYPE_FLOW_TABLE_NUM = 0x101,
};
struct mlx5_ifc_dest_format_struct_bits {
@@ -1668,7 +1696,11 @@ struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
u8 rx_buffer_full_low[0x20];
- u8 reserved_at_1c0[0x600];
+ u8 rx_icrc_encapsulated_high[0x20];
+
+ u8 rx_icrc_encapsulated_low[0x20];
+
+ u8 reserved_at_200[0x5c0];
};
struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
@@ -2367,6 +2399,8 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
MLX5_FLOW_CONTEXT_ACTION_VLAN_POP = 0x80,
MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH = 0x100,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_POP_2 = 0x400,
+ MLX5_FLOW_CONTEXT_ACTION_VLAN_PUSH_2 = 0x800,
};
struct mlx5_ifc_vlan_bits {
@@ -2397,7 +2431,9 @@ struct mlx5_ifc_flow_context_bits {
u8 modify_header_id[0x20];
- u8 reserved_at_100[0x100];
+ struct mlx5_ifc_vlan_bits push_vlan_2;
+
+ u8 reserved_at_120[0xe0];
struct mlx5_ifc_fte_match_param_bits match_value;
@@ -3733,8 +3769,8 @@ struct mlx5_ifc_query_vport_state_out_bits {
};
enum {
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT = 0x0,
- MLX5_QUERY_VPORT_STATE_IN_OP_MOD_ESW_VPORT = 0x1,
+ MLX5_VPORT_STATE_OP_MOD_VNIC_VPORT = 0x0,
+ MLX5_VPORT_STATE_OP_MOD_ESW_VPORT = 0x1,
};
struct mlx5_ifc_query_vport_state_in_bits {
@@ -8030,9 +8066,23 @@ struct mlx5_ifc_peir_reg_bits {
u8 error_type[0x8];
};
-struct mlx5_ifc_pcam_enhanced_features_bits {
- u8 reserved_at_0[0x76];
+struct mlx5_ifc_mpegc_reg_bits {
+ u8 reserved_at_0[0x30];
+ u8 field_select[0x10];
+ u8 tx_overflow_sense[0x1];
+ u8 mark_cqe[0x1];
+ u8 mark_cnp[0x1];
+ u8 reserved_at_43[0x1b];
+ u8 tx_lossy_overflow_oper[0x2];
+
+ u8 reserved_at_60[0x100];
+};
+
+struct mlx5_ifc_pcam_enhanced_features_bits {
+ u8 reserved_at_0[0x6d];
+ u8 rx_icrc_encapsulated_counter[0x1];
+ u8 reserved_at_6e[0x8];
u8 pfcc_mask[0x1];
u8 reserved_at_77[0x4];
u8 rx_buffer_fullness_counters[0x1];
@@ -8077,7 +8127,11 @@ struct mlx5_ifc_pcam_reg_bits {
};
struct mlx5_ifc_mcam_enhanced_features_bits {
- u8 reserved_at_0[0x7b];
+ u8 reserved_at_0[0x74];
+ u8 mark_tx_action_cnp[0x1];
+ u8 mark_tx_action_cqe[0x1];
+ u8 dynamic_tx_overflow[0x1];
+ u8 reserved_at_77[0x4];
u8 pcie_outbound_stalled[0x1];
u8 tx_overflow_buffer_pkt[0x1];
u8 mtpps_enh_out_per_adj[0x1];
@@ -8092,7 +8146,11 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 mcqi[0x1];
u8 reserved_at_1f[0x1];
- u8 regs_95_to_64[0x20];
+ u8 regs_95_to_87[0x9];
+ u8 mpegc[0x1];
+ u8 regs_85_to_68[0x12];
+ u8 tracer_registers[0x4];
+
u8 regs_63_to_32[0x20];
u8 regs_31_to_0[0x20];
};
@@ -9115,4 +9173,113 @@ struct mlx5_ifc_dealloc_memic_out_bits {
u8 reserved_at_40[0x40];
};
+struct mlx5_ifc_general_obj_in_cmd_hdr_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 obj_type[0x10];
+
+ u8 obj_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 obj_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_umem_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x5b];
+ u8 log_page_size[0x5];
+
+ u8 page_offset[0x20];
+
+ u8 num_of_mtt[0x40];
+
+ struct mlx5_ifc_mtt_bits mtt[0];
+};
+
+struct mlx5_ifc_uctx_bits {
+ u8 modify_field_select[0x40];
+
+ u8 reserved_at_40[0x1c0];
+};
+
+struct mlx5_ifc_create_umem_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_umem_bits umem;
+};
+
+struct mlx5_ifc_create_uctx_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_uctx_bits uctx;
+};
+
+struct mlx5_ifc_mtrc_string_db_param_bits {
+ u8 string_db_base_address[0x20];
+
+ u8 reserved_at_20[0x8];
+ u8 string_db_size[0x18];
+};
+
+struct mlx5_ifc_mtrc_cap_bits {
+ u8 trace_owner[0x1];
+ u8 trace_to_memory[0x1];
+ u8 reserved_at_2[0x4];
+ u8 trc_ver[0x2];
+ u8 reserved_at_8[0x14];
+ u8 num_string_db[0x4];
+
+ u8 first_string_trace[0x8];
+ u8 num_string_trace[0x8];
+ u8 reserved_at_30[0x28];
+
+ u8 log_max_trace_buffer_size[0x8];
+
+ u8 reserved_at_60[0x20];
+
+ struct mlx5_ifc_mtrc_string_db_param_bits string_db_param[8];
+
+ u8 reserved_at_280[0x180];
+};
+
+struct mlx5_ifc_mtrc_conf_bits {
+ u8 reserved_at_0[0x1c];
+ u8 trace_mode[0x4];
+ u8 reserved_at_20[0x18];
+ u8 log_trace_buffer_size[0x8];
+ u8 trace_mkey[0x20];
+ u8 reserved_at_60[0x3a0];
+};
+
+struct mlx5_ifc_mtrc_stdb_bits {
+ u8 string_db_index[0x4];
+ u8 reserved_at_4[0x4];
+ u8 read_size[0x18];
+ u8 start_offset[0x20];
+ u8 string_db_data[0];
+};
+
+struct mlx5_ifc_mtrc_ctrl_bits {
+ u8 trace_status[0x2];
+ u8 reserved_at_2[0x2];
+ u8 arm_event[0x1];
+ u8 reserved_at_5[0xb];
+ u8 modify_field_select[0x10];
+ u8 reserved_at_20[0x2b];
+ u8 current_timestamp52_32[0x15];
+ u8 current_timestamp31_0[0x20];
+ u8 reserved_at_80[0x180];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/mlx5_ifc_fpga.h b/include/linux/mlx5/mlx5_ifc_fpga.h
index 64d0f40d4cc3..37e065a80a43 100644
--- a/include/linux/mlx5/mlx5_ifc_fpga.h
+++ b/include/linux/mlx5/mlx5_ifc_fpga.h
@@ -576,6 +576,7 @@ struct mlx5_ifc_fpga_ipsec_sa {
enum fpga_tls_cmds {
CMD_SETUP_STREAM = 0x1001,
CMD_TEARDOWN_STREAM = 0x1002,
+ CMD_RESYNC_RX = 0x1003,
};
#define MLX5_TLS_1_2 (0)
diff --git a/include/linux/mlx5/vport.h b/include/linux/mlx5/vport.h
index 9208cb8809ac..7e7c6dfcfb09 100644
--- a/include/linux/mlx5/vport.h
+++ b/include/linux/mlx5/vport.h
@@ -43,8 +43,6 @@ enum {
};
u8 mlx5_query_vport_state(struct mlx5_core_dev *mdev, u8 opmod, u16 vport);
-u8 mlx5_query_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
- u16 vport);
int mlx5_modify_vport_admin_state(struct mlx5_core_dev *mdev, u8 opmod,
u16 vport, u8 state);
int mlx5_query_nic_vport_mac_address(struct mlx5_core_dev *mdev,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a0fbb9ffe380..8fcc36660de6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -155,7 +155,9 @@ extern int overcommit_kbytes_handler(struct ctl_table *, int, void __user *,
* mmap() functions).
*/
-extern struct kmem_cache *vm_area_cachep;
+struct vm_area_struct *vm_area_alloc(struct mm_struct *);
+struct vm_area_struct *vm_area_dup(struct vm_area_struct *);
+void vm_area_free(struct vm_area_struct *);
#ifndef CONFIG_MMU
extern struct rb_root nommu_region_tree;
@@ -450,6 +452,24 @@ struct vm_operations_struct {
unsigned long addr);
};
+static inline void vma_init(struct vm_area_struct *vma, struct mm_struct *mm)
+{
+ static const struct vm_operations_struct dummy_vm_ops = {};
+
+ memset(vma, 0, sizeof(*vma));
+ vma->vm_mm = mm;
+ vma->vm_ops = &dummy_vm_ops;
+ INIT_LIST_HEAD(&vma->anon_vma_chain);
+}
+
+static inline void vma_set_anonymous(struct vm_area_struct *vma)
+{
+ vma->vm_ops = NULL;
+}
+
+/* flush_tlb_range() takes a vma, not a mm, and can care about flags */
+#define TLB_FLUSH_VMA(mm,flags) { .vm_mm = (mm), .vm_flags = (flags) }
+
struct mmu_gather;
struct inode;
@@ -708,10 +728,10 @@ static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
return pte;
}
-int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
+vm_fault_t alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
struct page *page);
-int finish_fault(struct vm_fault *vmf);
-int finish_mkwrite_fault(struct vm_fault *vmf);
+vm_fault_t finish_fault(struct vm_fault *vmf);
+vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#endif
/*
@@ -940,15 +960,6 @@ static inline int page_zone_id(struct page *page)
return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
}
-static inline int zone_to_nid(struct zone *zone)
-{
-#ifdef CONFIG_NUMA
- return zone->node;
-#else
- return 0;
-#endif
-}
-
#ifdef NODE_NOT_IN_PAGE_FLAGS
extern int page_to_nid(const struct page *page);
#else
@@ -1392,8 +1403,8 @@ int generic_error_remove_page(struct address_space *mapping, struct page *page);
int invalidate_inode_page(struct page *page);
#ifdef CONFIG_MMU
-extern int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
- unsigned int flags);
+extern vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
+ unsigned long address, unsigned int flags);
extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
unsigned long address, unsigned int fault_flags,
bool *unlocked);
@@ -1402,7 +1413,7 @@ void unmap_mapping_pages(struct address_space *mapping,
void unmap_mapping_range(struct address_space *mapping,
loff_t const holebegin, loff_t const holelen, int even_cows);
#else
-static inline int handle_mm_fault(struct vm_area_struct *vma,
+static inline vm_fault_t handle_mm_fault(struct vm_area_struct *vma,
unsigned long address, unsigned int flags)
{
/* should never happen if there's no MMU */
@@ -2004,7 +2015,7 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
extern void __init pagecache_init(void);
extern void free_area_init(unsigned long * zones_size);
-extern void free_area_init_node(int nid, unsigned long * zones_size,
+extern void __init free_area_init_node(int nid, unsigned long * zones_size,
unsigned long zone_start_pfn, unsigned long *zholes_size);
extern void free_initmem(void);
@@ -2132,7 +2143,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
struct mminit_pfnnid_cache *state);
#endif
-#ifdef CONFIG_HAVE_MEMBLOCK
+#if defined(CONFIG_HAVE_MEMBLOCK) && !defined(CONFIG_FLAT_NODE_MEM_MAP)
void zero_resv_unavail(void);
#else
static inline void zero_resv_unavail(void) {}
@@ -2552,7 +2563,7 @@ static inline struct page *follow_page(struct vm_area_struct *vma,
#define FOLL_COW 0x4000 /* internal GUP flag */
#define FOLL_ANON 0x8000 /* don't do file mappings */
-static inline int vm_fault_to_errno(int vm_fault, int foll_flags)
+static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
{
if (vm_fault & VM_FAULT_OOM)
return -ENOMEM;
@@ -2646,12 +2657,7 @@ extern int randomize_va_space;
const char * arch_vma_name(struct vm_area_struct *vma);
void print_vma_addr(char *prefix, unsigned long rip);
-void sparse_mem_maps_populate_node(struct page **map_map,
- unsigned long pnum_begin,
- unsigned long pnum_end,
- unsigned long map_count,
- int nodeid);
-
+void *sparse_buffer_alloc(unsigned long size);
struct page *sparse_mem_map_populate(unsigned long pnum, int nid,
struct vmem_altmap *altmap);
pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
@@ -2733,7 +2739,8 @@ extern void clear_huge_page(struct page *page,
unsigned long addr_hint,
unsigned int pages_per_huge_page);
extern void copy_user_huge_page(struct page *dst, struct page *src,
- unsigned long addr, struct vm_area_struct *vma,
+ unsigned long addr_hint,
+ struct vm_area_struct *vma,
unsigned int pages_per_huge_page);
extern long copy_huge_page_from_user(struct page *dst_page,
const void __user *usr_src,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 99ce070e7dcb..cd2bc939efd0 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -139,7 +139,10 @@ struct page {
unsigned long _pt_pad_1; /* compound_head */
pgtable_t pmd_huge_pte; /* protected by page->ptl */
unsigned long _pt_pad_2; /* mapping */
- struct mm_struct *pt_mm; /* x86 pgds only */
+ union {
+ struct mm_struct *pt_mm; /* x86 pgds only */
+ atomic_t pt_frag_refcount; /* powerpc */
+ };
#if ALLOC_SPLIT_PTLOCKS
spinlock_t *ptl;
#else
@@ -335,176 +338,183 @@ struct core_state {
struct kioctx_table;
struct mm_struct {
- struct vm_area_struct *mmap; /* list of VMAs */
- struct rb_root mm_rb;
- u32 vmacache_seqnum; /* per-thread vmacache */
+ struct {
+ struct vm_area_struct *mmap; /* list of VMAs */
+ struct rb_root mm_rb;
+ u32 vmacache_seqnum; /* per-thread vmacache */
#ifdef CONFIG_MMU
- unsigned long (*get_unmapped_area) (struct file *filp,
+ unsigned long (*get_unmapped_area) (struct file *filp,
unsigned long addr, unsigned long len,
unsigned long pgoff, unsigned long flags);
#endif
- unsigned long mmap_base; /* base of mmap area */
- unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
+ unsigned long mmap_base; /* base of mmap area */
+ unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
- /* Base adresses for compatible mmap() */
- unsigned long mmap_compat_base;
- unsigned long mmap_compat_legacy_base;
+ /* Base adresses for compatible mmap() */
+ unsigned long mmap_compat_base;
+ unsigned long mmap_compat_legacy_base;
#endif
- unsigned long task_size; /* size of task vm space */
- unsigned long highest_vm_end; /* highest vma end address */
- pgd_t * pgd;
-
- /**
- * @mm_users: The number of users including userspace.
- *
- * Use mmget()/mmget_not_zero()/mmput() to modify. When this drops
- * to 0 (i.e. when the task exits and there are no other temporary
- * reference holders), we also release a reference on @mm_count
- * (which may then free the &struct mm_struct if @mm_count also
- * drops to 0).
- */
- atomic_t mm_users;
-
- /**
- * @mm_count: The number of references to &struct mm_struct
- * (@mm_users count as 1).
- *
- * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
- * &struct mm_struct is freed.
- */
- atomic_t mm_count;
+ unsigned long task_size; /* size of task vm space */
+ unsigned long highest_vm_end; /* highest vma end address */
+ pgd_t * pgd;
+
+ /**
+ * @mm_users: The number of users including userspace.
+ *
+ * Use mmget()/mmget_not_zero()/mmput() to modify. When this
+ * drops to 0 (i.e. when the task exits and there are no other
+ * temporary reference holders), we also release a reference on
+ * @mm_count (which may then free the &struct mm_struct if
+ * @mm_count also drops to 0).
+ */
+ atomic_t mm_users;
+
+ /**
+ * @mm_count: The number of references to &struct mm_struct
+ * (@mm_users count as 1).
+ *
+ * Use mmgrab()/mmdrop() to modify. When this drops to 0, the
+ * &struct mm_struct is freed.
+ */
+ atomic_t mm_count;
#ifdef CONFIG_MMU
- atomic_long_t pgtables_bytes; /* PTE page table pages */
+ atomic_long_t pgtables_bytes; /* PTE page table pages */
#endif
- int map_count; /* number of VMAs */
+ int map_count; /* number of VMAs */
- spinlock_t page_table_lock; /* Protects page tables and some counters */
- struct rw_semaphore mmap_sem;
+ spinlock_t page_table_lock; /* Protects page tables and some
+ * counters
+ */
+ struct rw_semaphore mmap_sem;
- struct list_head mmlist; /* List of maybe swapped mm's. These are globally strung
- * together off init_mm.mmlist, and are protected
- * by mmlist_lock
- */
+ struct list_head mmlist; /* List of maybe swapped mm's. These
+ * are globally strung together off
+ * init_mm.mmlist, and are protected
+ * by mmlist_lock
+ */
- unsigned long hiwater_rss; /* High-watermark of RSS usage */
- unsigned long hiwater_vm; /* High-water virtual memory usage */
+ unsigned long hiwater_rss; /* High-watermark of RSS usage */
+ unsigned long hiwater_vm; /* High-water virtual memory usage */
- unsigned long total_vm; /* Total pages mapped */
- unsigned long locked_vm; /* Pages that have PG_mlocked set */
- unsigned long pinned_vm; /* Refcount permanently increased */
- unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
- unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
- unsigned long stack_vm; /* VM_STACK */
- unsigned long def_flags;
+ unsigned long total_vm; /* Total pages mapped */
+ unsigned long locked_vm; /* Pages that have PG_mlocked set */
+ unsigned long pinned_vm; /* Refcount permanently increased */
+ unsigned long data_vm; /* VM_WRITE & ~VM_SHARED & ~VM_STACK */
+ unsigned long exec_vm; /* VM_EXEC & ~VM_WRITE & ~VM_STACK */
+ unsigned long stack_vm; /* VM_STACK */
+ unsigned long def_flags;
- spinlock_t arg_lock; /* protect the below fields */
- unsigned long start_code, end_code, start_data, end_data;
- unsigned long start_brk, brk, start_stack;
- unsigned long arg_start, arg_end, env_start, env_end;
+ spinlock_t arg_lock; /* protect the below fields */
+ unsigned long start_code, end_code, start_data, end_data;
+ unsigned long start_brk, brk, start_stack;
+ unsigned long arg_start, arg_end, env_start, env_end;
- unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
+ unsigned long saved_auxv[AT_VECTOR_SIZE]; /* for /proc/PID/auxv */
- /*
- * Special counters, in some configurations protected by the
- * page_table_lock, in other configurations by being atomic.
- */
- struct mm_rss_stat rss_stat;
-
- struct linux_binfmt *binfmt;
+ /*
+ * Special counters, in some configurations protected by the
+ * page_table_lock, in other configurations by being atomic.
+ */
+ struct mm_rss_stat rss_stat;
- cpumask_var_t cpu_vm_mask_var;
+ struct linux_binfmt *binfmt;
- /* Architecture-specific MM context */
- mm_context_t context;
+ /* Architecture-specific MM context */
+ mm_context_t context;
- unsigned long flags; /* Must use atomic bitops to access the bits */
+ unsigned long flags; /* Must use atomic bitops to access */
- struct core_state *core_state; /* coredumping support */
+ struct core_state *core_state; /* coredumping support */
#ifdef CONFIG_MEMBARRIER
- atomic_t membarrier_state;
+ atomic_t membarrier_state;
#endif
#ifdef CONFIG_AIO
- spinlock_t ioctx_lock;
- struct kioctx_table __rcu *ioctx_table;
+ spinlock_t ioctx_lock;
+ struct kioctx_table __rcu *ioctx_table;
#endif
#ifdef CONFIG_MEMCG
- /*
- * "owner" points to a task that is regarded as the canonical
- * user/owner of this mm. All of the following must be true in
- * order for it to be changed:
- *
- * current == mm->owner
- * current->mm != mm
- * new_owner->mm == mm
- * new_owner->alloc_lock is held
- */
- struct task_struct __rcu *owner;
+ /*
+ * "owner" points to a task that is regarded as the canonical
+ * user/owner of this mm. All of the following must be true in
+ * order for it to be changed:
+ *
+ * current == mm->owner
+ * current->mm != mm
+ * new_owner->mm == mm
+ * new_owner->alloc_lock is held
+ */
+ struct task_struct __rcu *owner;
#endif
- struct user_namespace *user_ns;
+ struct user_namespace *user_ns;
- /* store ref to file /proc/<pid>/exe symlink points to */
- struct file __rcu *exe_file;
+ /* store ref to file /proc/<pid>/exe symlink points to */
+ struct file __rcu *exe_file;
#ifdef CONFIG_MMU_NOTIFIER
- struct mmu_notifier_mm *mmu_notifier_mm;
+ struct mmu_notifier_mm *mmu_notifier_mm;
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
- pgtable_t pmd_huge_pte; /* protected by page_table_lock */
-#endif
-#ifdef CONFIG_CPUMASK_OFFSTACK
- struct cpumask cpumask_allocation;
+ pgtable_t pmd_huge_pte; /* protected by page_table_lock */
#endif
#ifdef CONFIG_NUMA_BALANCING
- /*
- * numa_next_scan is the next time that the PTEs will be marked
- * pte_numa. NUMA hinting faults will gather statistics and migrate
- * pages to new nodes if necessary.
- */
- unsigned long numa_next_scan;
+ /*
+ * numa_next_scan is the next time that the PTEs will be marked
+ * pte_numa. NUMA hinting faults will gather statistics and
+ * migrate pages to new nodes if necessary.
+ */
+ unsigned long numa_next_scan;
- /* Restart point for scanning and setting pte_numa */
- unsigned long numa_scan_offset;
+ /* Restart point for scanning and setting pte_numa */
+ unsigned long numa_scan_offset;
- /* numa_scan_seq prevents two threads setting pte_numa */
- int numa_scan_seq;
+ /* numa_scan_seq prevents two threads setting pte_numa */
+ int numa_scan_seq;
#endif
- /*
- * An operation with batched TLB flushing is going on. Anything that
- * can move process memory needs to flush the TLB when moving a
- * PROT_NONE or PROT_NUMA mapped page.
- */
- atomic_t tlb_flush_pending;
+ /*
+ * An operation with batched TLB flushing is going on. Anything
+ * that can move process memory needs to flush the TLB when
+ * moving a PROT_NONE or PROT_NUMA mapped page.
+ */
+ atomic_t tlb_flush_pending;
#ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
- /* See flush_tlb_batched_pending() */
- bool tlb_flush_batched;
+ /* See flush_tlb_batched_pending() */
+ bool tlb_flush_batched;
#endif
- struct uprobes_state uprobes_state;
+ struct uprobes_state uprobes_state;
#ifdef CONFIG_HUGETLB_PAGE
- atomic_long_t hugetlb_usage;
+ atomic_long_t hugetlb_usage;
#endif
- struct work_struct async_put_work;
+ struct work_struct async_put_work;
#if IS_ENABLED(CONFIG_HMM)
- /* HMM needs to track a few things per mm */
- struct hmm *hmm;
+ /* HMM needs to track a few things per mm */
+ struct hmm *hmm;
#endif
-} __randomize_layout;
+ } __randomize_layout;
+
+ /*
+ * The mm_cpumask needs to be at the end of mm_struct, because it
+ * is dynamically sized based on nr_cpu_ids.
+ */
+ unsigned long cpu_bitmap[];
+};
extern struct mm_struct init_mm;
+/* Pointer magic because the dynamic array size confuses some compilers. */
static inline void mm_init_cpumask(struct mm_struct *mm)
{
-#ifdef CONFIG_CPUMASK_OFFSTACK
- mm->cpu_vm_mask_var = &mm->cpumask_allocation;
-#endif
- cpumask_clear(mm->cpu_vm_mask_var);
+ unsigned long cpu_bitmap = (unsigned long)mm;
+
+ cpu_bitmap += offsetof(struct mm_struct, cpu_bitmap);
+ cpumask_clear((struct cpumask *)cpu_bitmap);
}
/* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
static inline cpumask_t *mm_cpumask(struct mm_struct *mm)
{
- return mm->cpu_vm_mask_var;
+ return (struct cpumask *)&mm->cpu_bitmap;
}
struct mmu_gather;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 64300a48dcce..beed7121c781 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -146,6 +146,13 @@ struct mmc_host_ops {
/* Prepare HS400 target operating frequency depending host driver */
int (*prepare_hs400_tuning)(struct mmc_host *host, struct mmc_ios *ios);
+
+ /* Prepare for switching from HS400 to HS200 */
+ void (*hs400_downgrade)(struct mmc_host *host);
+
+ /* Complete selection of HS400 */
+ void (*hs400_complete)(struct mmc_host *host);
+
/* Prepare enhanced strobe depending host driver */
void (*hs400_enhanced_strobe)(struct mmc_host *host,
struct mmc_ios *ios);
@@ -474,9 +481,6 @@ static inline void *mmc_priv(struct mmc_host *host)
#define mmc_classdev(x) (&(x)->class_dev)
#define mmc_hostname(x) (dev_name(&(x)->class_dev))
-int mmc_power_save_host(struct mmc_host *host);
-int mmc_power_restore_host(struct mmc_host *host);
-
void mmc_detect_change(struct mmc_host *, unsigned long delay);
void mmc_request_done(struct mmc_host *, struct mmc_request *);
void mmc_command_done(struct mmc_host *host, struct mmc_request *mrq);
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index 3ffc27aaeeaf..897a87c4c827 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -144,7 +144,7 @@ static inline bool mmc_op_multi(u32 opcode)
#define R1_WP_ERASE_SKIP (1 << 15) /* sx, c */
#define R1_CARD_ECC_DISABLED (1 << 14) /* sx, a */
#define R1_ERASE_RESET (1 << 13) /* sr, c */
-#define R1_STATUS(x) (x & 0xFFFFE000)
+#define R1_STATUS(x) (x & 0xFFF9A000)
#define R1_CURRENT_STATE(x) ((x & 0x00001E00) >> 9) /* sx, b (4 bits) */
#define R1_READY_FOR_DATA (1 << 8) /* sx, a */
#define R1_SWITCH_ERROR (1 << 7) /* sx, c */
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 392e6af82701..133ba78820ee 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -151,13 +151,15 @@ struct mmu_notifier_ops {
* address space but may still be referenced by sptes until
* the last refcount is dropped.
*
- * If both of these callbacks cannot block, and invalidate_range
- * cannot block, mmu_notifier_ops.flags should have
- * MMU_INVALIDATE_DOES_NOT_BLOCK set.
+ * If blockable argument is set to false then the callback cannot
+ * sleep and has to return with -EAGAIN. 0 should be returned
+ * otherwise.
+ *
*/
- void (*invalidate_range_start)(struct mmu_notifier *mn,
+ int (*invalidate_range_start)(struct mmu_notifier *mn,
struct mm_struct *mm,
- unsigned long start, unsigned long end);
+ unsigned long start, unsigned long end,
+ bool blockable);
void (*invalidate_range_end)(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start, unsigned long end);
@@ -229,8 +231,9 @@ extern int __mmu_notifier_test_young(struct mm_struct *mm,
unsigned long address);
extern void __mmu_notifier_change_pte(struct mm_struct *mm,
unsigned long address, pte_t pte);
-extern void __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
- unsigned long start, unsigned long end);
+extern int __mmu_notifier_invalidate_range_start(struct mm_struct *mm,
+ unsigned long start, unsigned long end,
+ bool blockable);
extern void __mmu_notifier_invalidate_range_end(struct mm_struct *mm,
unsigned long start, unsigned long end,
bool only_end);
@@ -281,7 +284,15 @@ static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
if (mm_has_notifiers(mm))
- __mmu_notifier_invalidate_range_start(mm, start, end);
+ __mmu_notifier_invalidate_range_start(mm, start, end, true);
+}
+
+static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ if (mm_has_notifiers(mm))
+ return __mmu_notifier_invalidate_range_start(mm, start, end, false);
+ return 0;
}
static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
@@ -461,6 +472,12 @@ static inline void mmu_notifier_invalidate_range_start(struct mm_struct *mm,
{
}
+static inline int mmu_notifier_invalidate_range_start_nonblock(struct mm_struct *mm,
+ unsigned long start, unsigned long end)
+{
+ return 0;
+}
+
static inline void mmu_notifier_invalidate_range_end(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 32699b2dc52a..1e22d96734e0 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -755,25 +755,6 @@ static inline bool pgdat_is_empty(pg_data_t *pgdat)
return !pgdat->node_start_pfn && !pgdat->node_spanned_pages;
}
-static inline int zone_id(const struct zone *zone)
-{
- struct pglist_data *pgdat = zone->zone_pgdat;
-
- return zone - pgdat->node_zones;
-}
-
-#ifdef CONFIG_ZONE_DEVICE
-static inline bool is_dev_zone(const struct zone *zone)
-{
- return zone_id(zone) == ZONE_DEVICE;
-}
-#else
-static inline bool is_dev_zone(const struct zone *zone)
-{
- return false;
-}
-#endif
-
#include <linux/memory_hotplug.h>
void build_all_zonelists(pg_data_t *pgdat);
@@ -824,6 +805,18 @@ static inline int local_memory_node(int node_id) { return node_id; };
*/
#define zone_idx(zone) ((zone) - (zone)->zone_pgdat->node_zones)
+#ifdef CONFIG_ZONE_DEVICE
+static inline bool is_dev_zone(const struct zone *zone)
+{
+ return zone_idx(zone) == ZONE_DEVICE;
+}
+#else
+static inline bool is_dev_zone(const struct zone *zone)
+{
+ return false;
+}
+#endif
+
/*
* Returns true if a zone has pages managed by the buddy allocator.
* All the reclaim decisions have to use this function rather than
@@ -841,6 +834,25 @@ static inline bool populated_zone(struct zone *zone)
return zone->present_pages;
}
+#ifdef CONFIG_NUMA
+static inline int zone_to_nid(struct zone *zone)
+{
+ return zone->node;
+}
+
+static inline void zone_set_nid(struct zone *zone, int nid)
+{
+ zone->node = nid;
+}
+#else
+static inline int zone_to_nid(struct zone *zone)
+{
+ return 0;
+}
+
+static inline void zone_set_nid(struct zone *zone, int nid) {}
+#endif
+
extern int movable_zone;
#ifdef CONFIG_HIGHMEM
@@ -956,12 +968,7 @@ static inline int zonelist_zone_idx(struct zoneref *zoneref)
static inline int zonelist_node_idx(struct zoneref *zoneref)
{
-#ifdef CONFIG_NUMA
- /* zone_to_nid not available in this context */
- return zoneref->zone->node;
-#else
- return 0;
-#endif /* CONFIG_NUMA */
+ return zone_to_nid(zoneref->zone);
}
struct zoneref *__next_zones_zonelist(struct zoneref *z,
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 96a71a648eed..1298a7daa57d 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -746,4 +746,19 @@ struct tb_service_id {
#define TBSVC_MATCH_PROTOCOL_VERSION 0x0004
#define TBSVC_MATCH_PROTOCOL_REVISION 0x0008
+/* USB Type-C Alternate Modes */
+
+#define TYPEC_ANY_MODE 0x7
+
+/**
+ * struct typec_device_id - USB Type-C alternate mode identifiers
+ * @svid: Standard or Vendor ID
+ * @mode: Mode index
+ */
+struct typec_device_id {
+ __u16 svid;
+ __u8 mode;
+ kernel_ulong_t driver_data;
+};
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index d44df9b2c131..f807f15bebbe 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -266,7 +266,7 @@ extern int modules_disabled; /* for sysctl */
/* Get/put a kernel symbol (calls must be symmetric) */
void *__symbol_get(const char *symbol);
void *__symbol_get_gpl(const char *symbol);
-#define symbol_get(x) ((typeof(&x))(__symbol_get(VMLINUX_SYMBOL_STR(x))))
+#define symbol_get(x) ((typeof(&x))(__symbol_get(__stringify(x))))
/* modules using other modules: kdb wants to see this. */
struct module_use {
@@ -575,7 +575,7 @@ extern void __noreturn __module_put_and_exit(struct module *mod,
#ifdef CONFIG_MODULE_UNLOAD
int module_refcount(struct module *mod);
void __symbol_put(const char *symbol);
-#define symbol_put(x) __symbol_put(VMLINUX_SYMBOL_STR(x))
+#define symbol_put(x) __symbol_put(__stringify(x))
void symbol_put_addr(void *addr);
/* Sometimes we know we already have a refcount, and it's easier not
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
index d633f737b3c6..6675b9f81979 100644
--- a/include/linux/mroute_base.h
+++ b/include/linux/mroute_base.h
@@ -2,7 +2,7 @@
#define __LINUX_MROUTE_BASE_H
#include <linux/netdevice.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
#include <linux/spinlock.h>
#include <net/net_namespace.h>
#include <net/sock.h>
@@ -254,6 +254,7 @@ struct mr_table {
atomic_t cache_resolve_queue_len;
bool mroute_do_assert;
bool mroute_do_pim;
+ bool mroute_do_wrvifwhole;
int mroute_reg_vif_num;
};
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index a86c4fa93115..cd0be91bdefa 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -67,9 +67,11 @@ struct mtd_erase_region_info {
* @datbuf: data buffer - if NULL only oob data are read/written
* @oobbuf: oob data buffer
*
- * Note, it is allowed to read more than one OOB area at one go, but not write.
- * The interface assumes that the OOB write requests program only one page's
- * OOB area.
+ * Note, some MTD drivers do not allow you to write more than one OOB area at
+ * one go. If you try to do that on such an MTD device, -EINVAL will be
+ * returned. If you want to make your implementation portable on all kind of MTD
+ * devices you should split the write request into several sub-requests when the
+ * request crosses a page boundary.
*/
struct mtd_oob_ops {
unsigned int mode;
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 3e8ec3b8a39c..efb2345359bb 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -21,11 +21,10 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/flashchip.h>
#include <linux/mtd/bbm.h>
+#include <linux/of.h>
#include <linux/types.h>
-struct mtd_info;
struct nand_flash_dev;
-struct device_node;
/* Scan and identify a NAND device */
int nand_scan_with_ids(struct mtd_info *mtd, int max_chips,
@@ -36,17 +35,6 @@ static inline int nand_scan(struct mtd_info *mtd, int max_chips)
return nand_scan_with_ids(mtd, max_chips, NULL);
}
-/*
- * Separate phases of nand_scan(), allowing board driver to intervene
- * and override command or ECC setup according to flash type.
- */
-int nand_scan_ident(struct mtd_info *mtd, int max_chips,
- struct nand_flash_dev *table);
-int nand_scan_tail(struct mtd_info *mtd);
-
-/* Unregister the MTD device and free resources held by the NAND device */
-void nand_release(struct mtd_info *mtd);
-
/* Internal helper for board drivers which need to override command function */
void nand_wait_ready(struct mtd_info *mtd);
@@ -121,6 +109,7 @@ enum nand_ecc_algo {
NAND_ECC_UNKNOWN,
NAND_ECC_HAMMING,
NAND_ECC_BCH,
+ NAND_ECC_RS,
};
/*
@@ -218,6 +207,12 @@ enum nand_ecc_algo {
*/
#define NAND_WAIT_TCCS 0x00200000
+/*
+ * Whether the NAND chip is a boot medium. Drivers might use this information
+ * to select ECC algorithms supported by the boot ROM or similar restrictions.
+ */
+#define NAND_IS_BOOT_MEDIUM 0x00400000
+
/* Options set by nand scan */
/* Nand scan has allocated controller struct */
#define NAND_CONTROLLER_ALLOC 0x80000000
@@ -230,6 +225,17 @@ enum nand_ecc_algo {
/* Keep gcc happy */
struct nand_chip;
+/* ONFI version bits */
+#define ONFI_VERSION_1_0 BIT(1)
+#define ONFI_VERSION_2_0 BIT(2)
+#define ONFI_VERSION_2_1 BIT(3)
+#define ONFI_VERSION_2_2 BIT(4)
+#define ONFI_VERSION_2_3 BIT(5)
+#define ONFI_VERSION_3_0 BIT(6)
+#define ONFI_VERSION_3_1 BIT(7)
+#define ONFI_VERSION_3_2 BIT(8)
+#define ONFI_VERSION_4_0 BIT(9)
+
/* ONFI features */
#define ONFI_FEATURE_16_BIT_BUS (1 << 0)
#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7)
@@ -470,13 +476,13 @@ struct onfi_params {
*/
struct nand_parameters {
/* Generic parameters */
- char model[100];
+ const char *model;
bool supports_set_get_features;
DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
/* ONFI parameters */
- struct onfi_params onfi;
+ struct onfi_params *onfi;
};
/* The maximum expected count of bytes in the NAND ID sequence */
@@ -493,20 +499,42 @@ struct nand_id {
};
/**
- * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices
+ * struct nand_controller_ops - Controller operations
+ *
+ * @attach_chip: this method is called after the NAND detection phase after
+ * flash ID and MTD fields such as erase size, page size and OOB
+ * size have been set up. ECC requirements are available if
+ * provided by the NAND chip or device tree. Typically used to
+ * choose the appropriate ECC configuration and allocate
+ * associated resources.
+ * This hook is optional.
+ * @detach_chip: free all resources allocated/claimed in
+ * nand_controller_ops->attach_chip().
+ * This hook is optional.
+ */
+struct nand_controller_ops {
+ int (*attach_chip)(struct nand_chip *chip);
+ void (*detach_chip)(struct nand_chip *chip);
+};
+
+/**
+ * struct nand_controller - Structure used to describe a NAND controller
+ *
* @lock: protection lock
* @active: the mtd device which holds the controller currently
* @wq: wait queue to sleep on if a NAND operation is in
* progress used instead of the per chip wait queue
* when a hw controller is available.
+ * @ops: NAND controller operations.
*/
-struct nand_hw_control {
+struct nand_controller {
spinlock_t lock;
struct nand_chip *active;
wait_queue_head_t wq;
+ const struct nand_controller_ops *ops;
};
-static inline void nand_hw_control_init(struct nand_hw_control *nfc)
+static inline void nand_controller_init(struct nand_controller *nfc)
{
nfc->active = NULL;
spin_lock_init(&nfc->lock);
@@ -778,11 +806,15 @@ nand_get_sdr_timings(const struct nand_data_interface *conf)
* implementation) if any.
* @cleanup: the ->init() function may have allocated resources, ->cleanup()
* is here to let vendor specific code release those resources.
+ * @fixup_onfi_param_page: apply vendor specific fixups to the ONFI parameter
+ * page. This is called after the checksum is verified.
*/
struct nand_manufacturer_ops {
void (*detect)(struct nand_chip *chip);
int (*init)(struct nand_chip *chip);
void (*cleanup)(struct nand_chip *chip);
+ void (*fixup_onfi_param_page)(struct nand_chip *chip,
+ struct nand_onfi_params *p);
};
/**
@@ -986,14 +1018,14 @@ struct nand_subop {
unsigned int last_instr_end_off;
};
-int nand_subop_get_addr_start_off(const struct nand_subop *subop,
- unsigned int op_id);
-int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
- unsigned int op_id);
-int nand_subop_get_data_start_off(const struct nand_subop *subop,
- unsigned int op_id);
-int nand_subop_get_data_len(const struct nand_subop *subop,
- unsigned int op_id);
+unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int op_id);
+unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int op_id);
/**
* struct nand_op_parser_addr_constraints - Constraints for address instructions
@@ -1176,9 +1208,9 @@ int nand_op_parser_exec_op(struct nand_chip *chip,
* setting the read-retry mode. Mostly needed for MLC NAND.
* @ecc: [BOARDSPECIFIC] ECC control structure
* @buf_align: minimum buffer alignment required by a platform
- * @hwcontrol: platform-specific hardware control structure
+ * @dummy_controller: dummy controller implementation for drivers that can
+ * only control a single chip
* @erase: [REPLACEABLE] erase function
- * @scan_bbt: [REPLACEABLE] function to scan bad block table
* @chip_delay: [BOARDSPECIFIC] chip dependent delay for transferring
* data from array to read regs (tR).
* @state: [INTERN] the current state of the NAND device
@@ -1271,7 +1303,6 @@ struct nand_chip {
const struct nand_operation *op,
bool check_only);
int (*erase)(struct mtd_info *mtd, int page);
- int (*scan_bbt)(struct mtd_info *mtd);
int (*set_features)(struct mtd_info *mtd, struct nand_chip *chip,
int feature_addr, uint8_t *subfeature_para);
int (*get_features)(struct mtd_info *mtd, struct nand_chip *chip,
@@ -1314,11 +1345,11 @@ struct nand_chip {
flstate_t state;
uint8_t *oob_poi;
- struct nand_hw_control *controller;
+ struct nand_controller *controller;
struct nand_ecc_ctrl ecc;
unsigned long buf_align;
- struct nand_hw_control hwcontrol;
+ struct nand_controller dummy_controller;
uint8_t *bbt;
struct nand_bbt_descr *bbt_td;
@@ -1517,14 +1548,12 @@ extern const struct nand_manufacturer_ops micron_nand_manuf_ops;
extern const struct nand_manufacturer_ops amd_nand_manuf_ops;
extern const struct nand_manufacturer_ops macronix_nand_manuf_ops;
-int nand_default_bbt(struct mtd_info *mtd);
+int nand_create_bbt(struct nand_chip *chip);
int nand_markbad_bbt(struct mtd_info *mtd, loff_t offs);
int nand_isreserved_bbt(struct mtd_info *mtd, loff_t offs);
int nand_isbad_bbt(struct mtd_info *mtd, loff_t offs, int allowbbt);
int nand_erase_nand(struct mtd_info *mtd, struct erase_info *instr,
int allowbbt);
-int nand_do_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, uint8_t *buf);
/**
* struct platform_nand_chip - chip level device structure
@@ -1555,14 +1584,12 @@ struct platform_device;
* struct platform_nand_ctrl - controller level device structure
* @probe: platform specific function to probe/setup hardware
* @remove: platform specific function to remove/teardown hardware
- * @hwcontrol: platform specific hardware control structure
* @dev_ready: platform specific function to read ready/busy pin
* @select_chip: platform specific chip select function
* @cmd_ctrl: platform specific function for controlling
* ALE/CLE/nCE. Also used to write command and address
* @write_buf: platform specific function for write buffer
* @read_buf: platform specific function for read buffer
- * @read_byte: platform specific function to read one byte from chip
* @priv: private data to transport driver specific settings
*
* All fields are optional and depend on the hardware driver requirements
@@ -1570,13 +1597,11 @@ struct platform_device;
struct platform_nand_ctrl {
int (*probe)(struct platform_device *pdev);
void (*remove)(struct platform_device *pdev);
- void (*hwcontrol)(struct mtd_info *mtd, int cmd);
int (*dev_ready)(struct mtd_info *mtd);
void (*select_chip)(struct mtd_info *mtd, int chip);
void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
- unsigned char (*read_byte)(struct mtd_info *mtd);
void *priv;
};
@@ -1593,10 +1618,10 @@ struct platform_nand_data {
/* return the supported asynchronous timing mode. */
static inline int onfi_get_async_timing_mode(struct nand_chip *chip)
{
- if (!chip->parameters.onfi.version)
+ if (!chip->parameters.onfi)
return ONFI_TIMING_MODE_UNKNOWN;
- return chip->parameters.onfi.async_timing_mode;
+ return chip->parameters.onfi->async_timing_mode;
}
int onfi_fill_data_interface(struct nand_chip *chip,
@@ -1641,14 +1666,8 @@ int nand_check_erased_ecc_chunk(void *data, int datalen,
void *extraoob, int extraooblen,
int threshold);
-int nand_check_ecc_caps(struct nand_chip *chip,
- const struct nand_ecc_caps *caps, int oobavail);
-
-int nand_match_ecc_req(struct nand_chip *chip,
- const struct nand_ecc_caps *caps, int oobavail);
-
-int nand_maximize_ecc(struct nand_chip *chip,
- const struct nand_ecc_caps *caps, int oobavail);
+int nand_ecc_choose_conf(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail);
/* Default write_oob implementation */
int nand_write_oob_std(struct mtd_info *mtd, struct nand_chip *chip, int page);
@@ -1674,10 +1693,14 @@ int nand_get_set_features_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
/* Default read_page_raw implementation */
int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
uint8_t *buf, int oob_required, int page);
+int nand_read_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
+ u8 *buf, int oob_required, int page);
/* Default write_page_raw implementation */
int nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
const uint8_t *buf, int oob_required, int page);
+int nand_write_page_raw_notsupp(struct mtd_info *mtd, struct nand_chip *chip,
+ const u8 *buf, int oob_required, int page);
/* Reset and initialize a NAND device */
int nand_reset(struct nand_chip *chip, int chipnr);
@@ -1711,8 +1734,13 @@ int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
int nand_write_data_op(struct nand_chip *chip, const void *buf,
unsigned int len, bool force_8bit);
-/* Free resources held by the NAND device */
+/*
+ * Free resources held by the NAND device, must be called on error after a
+ * sucessful nand_scan().
+ */
void nand_cleanup(struct nand_chip *chip);
+/* Unregister the MTD device and calls nand_cleanup() */
+void nand_release(struct mtd_info *mtd);
/* Default extended ID decoding function */
void nand_decode_ext_id(struct nand_chip *chip);
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index e60da0d34cc1..c922e97f205a 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -235,6 +235,7 @@ enum spi_nor_option_flags {
SNOR_F_S3AN_ADDR_DEFAULT = BIT(3),
SNOR_F_READY_XSR_RDY = BIT(4),
SNOR_F_USE_CLSR = BIT(5),
+ SNOR_F_BROKEN_RESET = BIT(6),
};
/**
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
new file mode 100644
index 000000000000..088ff96c3eb6
--- /dev/null
+++ b/include/linux/mtd/spinand.h
@@ -0,0 +1,421 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2017 Micron Technology, Inc.
+ *
+ * Authors:
+ * Peter Pan <peterpandong@micron.com>
+ */
+#ifndef __LINUX_MTD_SPINAND_H
+#define __LINUX_MTD_SPINAND_H
+
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/spi/spi.h>
+#include <linux/spi/spi-mem.h>
+
+/**
+ * Standard SPI NAND flash operations
+ */
+
+#define SPINAND_RESET_OP \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xff, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_WR_EN_DIS_OP(enable) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD((enable) ? 0x06 : 0x04, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_READID_OP(ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x9f, 1), \
+ SPI_MEM_OP_NO_ADDR, \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 1))
+
+#define SPINAND_SET_FEATURE_OP(reg, valptr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x1f, 1), \
+ SPI_MEM_OP_ADDR(1, reg, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(1, valptr, 1))
+
+#define SPINAND_GET_FEATURE_OP(reg, valptr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x0f, 1), \
+ SPI_MEM_OP_ADDR(1, reg, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_IN(1, valptr, 1))
+
+#define SPINAND_BLK_ERASE_OP(addr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xd8, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PAGE_READ_OP(addr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x13, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PAGE_READ_FROM_CACHE_OP(fast, addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(fast ? 0x0b : 0x03, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 1))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_X2_OP(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x3b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 2))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_X4_OP(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x6b, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_DUMMY(ndummy, 1), \
+ SPI_MEM_OP_DATA_IN(len, buf, 4))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_DUALIO_OP(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xbb, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 2), \
+ SPI_MEM_OP_DUMMY(ndummy, 2), \
+ SPI_MEM_OP_DATA_IN(len, buf, 2))
+
+#define SPINAND_PAGE_READ_FROM_CACHE_QUADIO_OP(addr, ndummy, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0xeb, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 4), \
+ SPI_MEM_OP_DUMMY(ndummy, 4), \
+ SPI_MEM_OP_DATA_IN(len, buf, 4))
+
+#define SPINAND_PROG_EXEC_OP(addr) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(0x10, 1), \
+ SPI_MEM_OP_ADDR(3, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_NO_DATA)
+
+#define SPINAND_PROG_LOAD(reset, addr, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x02 : 0x84, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(len, buf, 1))
+
+#define SPINAND_PROG_LOAD_X4(reset, addr, buf, len) \
+ SPI_MEM_OP(SPI_MEM_OP_CMD(reset ? 0x32 : 0x34, 1), \
+ SPI_MEM_OP_ADDR(2, addr, 1), \
+ SPI_MEM_OP_NO_DUMMY, \
+ SPI_MEM_OP_DATA_OUT(len, buf, 4))
+
+/**
+ * Standard SPI NAND flash commands
+ */
+#define SPINAND_CMD_PROG_LOAD_X4 0x32
+#define SPINAND_CMD_PROG_LOAD_RDM_DATA_X4 0x34
+
+/* feature register */
+#define REG_BLOCK_LOCK 0xa0
+#define BL_ALL_UNLOCKED 0x00
+
+/* configuration register */
+#define REG_CFG 0xb0
+#define CFG_OTP_ENABLE BIT(6)
+#define CFG_ECC_ENABLE BIT(4)
+#define CFG_QUAD_ENABLE BIT(0)
+
+/* status register */
+#define REG_STATUS 0xc0
+#define STATUS_BUSY BIT(0)
+#define STATUS_ERASE_FAILED BIT(2)
+#define STATUS_PROG_FAILED BIT(3)
+#define STATUS_ECC_MASK GENMASK(5, 4)
+#define STATUS_ECC_NO_BITFLIPS (0 << 4)
+#define STATUS_ECC_HAS_BITFLIPS (1 << 4)
+#define STATUS_ECC_UNCOR_ERROR (2 << 4)
+
+struct spinand_op;
+struct spinand_device;
+
+#define SPINAND_MAX_ID_LEN 4
+
+/**
+ * struct spinand_id - SPI NAND id structure
+ * @data: buffer containing the id bytes. Currently 4 bytes large, but can
+ * be extended if required
+ * @len: ID length
+ *
+ * struct_spinand_id->data contains all bytes returned after a READ_ID command,
+ * including dummy bytes if the chip does not emit ID bytes right after the
+ * READ_ID command. The responsibility to extract real ID bytes is left to
+ * struct_manufacurer_ops->detect().
+ */
+struct spinand_id {
+ u8 data[SPINAND_MAX_ID_LEN];
+ int len;
+};
+
+/**
+ * struct manufacurer_ops - SPI NAND manufacturer specific operations
+ * @detect: detect a SPI NAND device. Every time a SPI NAND device is probed
+ * the core calls the struct_manufacurer_ops->detect() hook of each
+ * registered manufacturer until one of them return 1. Note that
+ * the first thing to check in this hook is that the manufacturer ID
+ * in struct_spinand_device->id matches the manufacturer whose
+ * ->detect() hook has been called. Should return 1 if there's a
+ * match, 0 if the manufacturer ID does not match and a negative
+ * error code otherwise. When true is returned, the core assumes
+ * that properties of the NAND chip (spinand->base.memorg and
+ * spinand->base.eccreq) have been filled
+ * @init: initialize a SPI NAND device
+ * @cleanup: cleanup a SPI NAND device
+ *
+ * Each SPI NAND manufacturer driver should implement this interface so that
+ * NAND chips coming from this vendor can be detected and initialized properly.
+ */
+struct spinand_manufacturer_ops {
+ int (*detect)(struct spinand_device *spinand);
+ int (*init)(struct spinand_device *spinand);
+ void (*cleanup)(struct spinand_device *spinand);
+};
+
+/**
+ * struct spinand_manufacturer - SPI NAND manufacturer instance
+ * @id: manufacturer ID
+ * @name: manufacturer name
+ * @ops: manufacturer operations
+ */
+struct spinand_manufacturer {
+ u8 id;
+ char *name;
+ const struct spinand_manufacturer_ops *ops;
+};
+
+/* SPI NAND manufacturers */
+extern const struct spinand_manufacturer macronix_spinand_manufacturer;
+extern const struct spinand_manufacturer micron_spinand_manufacturer;
+extern const struct spinand_manufacturer winbond_spinand_manufacturer;
+
+/**
+ * struct spinand_op_variants - SPI NAND operation variants
+ * @ops: the list of variants for a given operation
+ * @nops: the number of variants
+ *
+ * Some operations like read-from-cache/write-to-cache have several variants
+ * depending on the number of IO lines you use to transfer data or address
+ * cycles. This structure is a way to describe the different variants supported
+ * by a chip and let the core pick the best one based on the SPI mem controller
+ * capabilities.
+ */
+struct spinand_op_variants {
+ const struct spi_mem_op *ops;
+ unsigned int nops;
+};
+
+#define SPINAND_OP_VARIANTS(name, ...) \
+ const struct spinand_op_variants name = { \
+ .ops = (struct spi_mem_op[]) { __VA_ARGS__ }, \
+ .nops = sizeof((struct spi_mem_op[]){ __VA_ARGS__ }) / \
+ sizeof(struct spi_mem_op), \
+ }
+
+/**
+ * spinand_ecc_info - description of the on-die ECC implemented by a SPI NAND
+ * chip
+ * @get_status: get the ECC status. Should return a positive number encoding
+ * the number of corrected bitflips if correction was possible or
+ * -EBADMSG if there are uncorrectable errors. I can also return
+ * other negative error codes if the error is not caused by
+ * uncorrectable bitflips
+ * @ooblayout: the OOB layout used by the on-die ECC implementation
+ */
+struct spinand_ecc_info {
+ int (*get_status)(struct spinand_device *spinand, u8 status);
+ const struct mtd_ooblayout_ops *ooblayout;
+};
+
+#define SPINAND_HAS_QE_BIT BIT(0)
+
+/**
+ * struct spinand_info - Structure used to describe SPI NAND chips
+ * @model: model name
+ * @devid: device ID
+ * @flags: OR-ing of the SPINAND_XXX flags
+ * @memorg: memory organization
+ * @eccreq: ECC requirements
+ * @eccinfo: on-die ECC info
+ * @op_variants: operations variants
+ * @op_variants.read_cache: variants of the read-cache operation
+ * @op_variants.write_cache: variants of the write-cache operation
+ * @op_variants.update_cache: variants of the update-cache operation
+ * @select_target: function used to select a target/die. Required only for
+ * multi-die chips
+ *
+ * Each SPI NAND manufacturer driver should have a spinand_info table
+ * describing all the chips supported by the driver.
+ */
+struct spinand_info {
+ const char *model;
+ u8 devid;
+ u32 flags;
+ struct nand_memory_organization memorg;
+ struct nand_ecc_req eccreq;
+ struct spinand_ecc_info eccinfo;
+ struct {
+ const struct spinand_op_variants *read_cache;
+ const struct spinand_op_variants *write_cache;
+ const struct spinand_op_variants *update_cache;
+ } op_variants;
+ int (*select_target)(struct spinand_device *spinand,
+ unsigned int target);
+};
+
+#define SPINAND_INFO_OP_VARIANTS(__read, __write, __update) \
+ { \
+ .read_cache = __read, \
+ .write_cache = __write, \
+ .update_cache = __update, \
+ }
+
+#define SPINAND_ECCINFO(__ooblayout, __get_status) \
+ .eccinfo = { \
+ .ooblayout = __ooblayout, \
+ .get_status = __get_status, \
+ }
+
+#define SPINAND_SELECT_TARGET(__func) \
+ .select_target = __func,
+
+#define SPINAND_INFO(__model, __id, __memorg, __eccreq, __op_variants, \
+ __flags, ...) \
+ { \
+ .model = __model, \
+ .devid = __id, \
+ .memorg = __memorg, \
+ .eccreq = __eccreq, \
+ .op_variants = __op_variants, \
+ .flags = __flags, \
+ __VA_ARGS__ \
+ }
+
+/**
+ * struct spinand_device - SPI NAND device instance
+ * @base: NAND device instance
+ * @spimem: pointer to the SPI mem object
+ * @lock: lock used to serialize accesses to the NAND
+ * @id: NAND ID as returned by READ_ID
+ * @flags: NAND flags
+ * @op_templates: various SPI mem op templates
+ * @op_templates.read_cache: read cache op template
+ * @op_templates.write_cache: write cache op template
+ * @op_templates.update_cache: update cache op template
+ * @select_target: select a specific target/die. Usually called before sending
+ * a command addressing a page or an eraseblock embedded in
+ * this die. Only required if your chip exposes several dies
+ * @cur_target: currently selected target/die
+ * @eccinfo: on-die ECC information
+ * @cfg_cache: config register cache. One entry per die
+ * @databuf: bounce buffer for data
+ * @oobbuf: bounce buffer for OOB data
+ * @scratchbuf: buffer used for everything but page accesses. This is needed
+ * because the spi-mem interface explicitly requests that buffers
+ * passed in spi_mem_op be DMA-able, so we can't based the bufs on
+ * the stack
+ * @manufacturer: SPI NAND manufacturer information
+ * @priv: manufacturer private data
+ */
+struct spinand_device {
+ struct nand_device base;
+ struct spi_mem *spimem;
+ struct mutex lock;
+ struct spinand_id id;
+ u32 flags;
+
+ struct {
+ const struct spi_mem_op *read_cache;
+ const struct spi_mem_op *write_cache;
+ const struct spi_mem_op *update_cache;
+ } op_templates;
+
+ int (*select_target)(struct spinand_device *spinand,
+ unsigned int target);
+ unsigned int cur_target;
+
+ struct spinand_ecc_info eccinfo;
+
+ u8 *cfg_cache;
+ u8 *databuf;
+ u8 *oobbuf;
+ u8 *scratchbuf;
+ const struct spinand_manufacturer *manufacturer;
+ void *priv;
+};
+
+/**
+ * mtd_to_spinand() - Get the SPI NAND device attached to an MTD instance
+ * @mtd: MTD instance
+ *
+ * Return: the SPI NAND device attached to @mtd.
+ */
+static inline struct spinand_device *mtd_to_spinand(struct mtd_info *mtd)
+{
+ return container_of(mtd_to_nanddev(mtd), struct spinand_device, base);
+}
+
+/**
+ * spinand_to_mtd() - Get the MTD device embedded in a SPI NAND device
+ * @spinand: SPI NAND device
+ *
+ * Return: the MTD device embedded in @spinand.
+ */
+static inline struct mtd_info *spinand_to_mtd(struct spinand_device *spinand)
+{
+ return nanddev_to_mtd(&spinand->base);
+}
+
+/**
+ * nand_to_spinand() - Get the SPI NAND device embedding an NAND object
+ * @nand: NAND object
+ *
+ * Return: the SPI NAND device embedding @nand.
+ */
+static inline struct spinand_device *nand_to_spinand(struct nand_device *nand)
+{
+ return container_of(nand, struct spinand_device, base);
+}
+
+/**
+ * spinand_to_nand() - Get the NAND device embedded in a SPI NAND object
+ * @spinand: SPI NAND device
+ *
+ * Return: the NAND device embedded in @spinand.
+ */
+static inline struct nand_device *
+spinand_to_nand(struct spinand_device *spinand)
+{
+ return &spinand->base;
+}
+
+/**
+ * spinand_set_of_node - Attach a DT node to a SPI NAND device
+ * @spinand: SPI NAND device
+ * @np: DT node
+ *
+ * Attach a DT node to a SPI NAND device.
+ */
+static inline void spinand_set_of_node(struct spinand_device *spinand,
+ struct device_node *np)
+{
+ nanddev_set_of_node(&spinand->base, np);
+}
+
+int spinand_match_and_init(struct spinand_device *dev,
+ const struct spinand_info *table,
+ unsigned int table_size, u8 devid);
+
+int spinand_upd_cfg(struct spinand_device *spinand, u8 mask, u8 val);
+int spinand_select_target(struct spinand_device *spinand, unsigned int target);
+
+#endif /* __LINUX_MTD_SPINAND_H */
diff --git a/include/linux/net.h b/include/linux/net.h
index 08b6eb964dd6..e0930678c8bf 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -114,7 +114,7 @@ struct socket {
unsigned long flags;
- struct socket_wq __rcu *wq;
+ struct socket_wq *wq;
struct file *file;
struct sock *sk;
@@ -147,7 +147,6 @@ struct proto_ops {
int (*getname) (struct socket *sock,
struct sockaddr *addr,
int peer);
- __poll_t (*poll_mask) (struct socket *sock, __poll_t events);
__poll_t (*poll) (struct file *file, struct socket *sock,
struct poll_table_struct *wait);
int (*ioctl) (struct socket *sock, unsigned int cmd,
diff --git a/include/linux/net_dim.h b/include/linux/net_dim.h
index db99240d00bd..c79e859408e6 100644
--- a/include/linux/net_dim.h
+++ b/include/linux/net_dim.h
@@ -363,7 +363,6 @@ static inline void net_dim_sample(u16 event_ctr,
}
#define NET_DIM_NEVENTS 64
-#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
#define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
static inline void net_dim_calc_stats(struct net_dim_sample *start,
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 623bb8ced060..2b2a6dce1630 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -79,6 +79,7 @@ enum {
NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */
NETIF_F_RX_UDP_TUNNEL_PORT_BIT, /* Offload of RX port for UDP tunnels */
NETIF_F_HW_TLS_TX_BIT, /* Hardware TLS TX offload */
+ NETIF_F_HW_TLS_RX_BIT, /* Hardware TLS RX offload */
NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */
NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */
@@ -151,6 +152,7 @@ enum {
#define NETIF_F_HW_TLS_RECORD __NETIF_F(HW_TLS_RECORD)
#define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4)
#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
+#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
#define for_each_netdev_feature(mask_addr, bit) \
for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 3ec9850c7936..ca5ab98053c8 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -302,6 +302,17 @@ struct netdev_boot_setup {
int __init netdev_boot_setup(char *str);
+struct gro_list {
+ struct list_head list;
+ int count;
+};
+
+/*
+ * size of gro hash buckets, must less than bit number of
+ * napi_struct::gro_bitmask
+ */
+#define GRO_HASH_BUCKETS 8
+
/*
* Structure for NAPI scheduling similar to tasklet but with weighting
*/
@@ -316,13 +327,13 @@ struct napi_struct {
unsigned long state;
int weight;
- unsigned int gro_count;
+ unsigned long gro_bitmask;
int (*poll)(struct napi_struct *, int);
#ifdef CONFIG_NETPOLL
int poll_owner;
#endif
struct net_device *dev;
- struct sk_buff *gro_list;
+ struct gro_list gro_hash[GRO_HASH_BUCKETS];
struct sk_buff *skb;
struct hrtimer timer;
struct list_head dev_list;
@@ -569,6 +580,9 @@ struct netdev_queue {
* (/sys/class/net/DEV/Q/trans_timeout)
*/
unsigned long trans_timeout;
+
+ /* Subordinate device that the queue has been assigned to */
+ struct net_device *sb_dev;
/*
* write-mostly part
*/
@@ -730,10 +744,15 @@ struct xps_map {
*/
struct xps_dev_maps {
struct rcu_head rcu;
- struct xps_map __rcu *cpu_map[0];
+ struct xps_map __rcu *attr_map[0]; /* Either CPUs map or RXQs map */
};
-#define XPS_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
+
+#define XPS_CPU_DEV_MAPS_SIZE(_tcs) (sizeof(struct xps_dev_maps) + \
(nr_cpu_ids * (_tcs) * sizeof(struct xps_map *)))
+
+#define XPS_RXQ_DEV_MAPS_SIZE(_tcs, _rxqs) (sizeof(struct xps_dev_maps) +\
+ (_rxqs * (_tcs) * sizeof(struct xps_map *)))
+
#endif /* CONFIG_XPS */
#define TC_MAX_QUEUE 16
@@ -779,7 +798,8 @@ static inline bool netdev_phys_item_id_same(struct netdev_phys_item_id *a,
}
typedef u16 (*select_queue_fallback_t)(struct net_device *dev,
- struct sk_buff *skb);
+ struct sk_buff *skb,
+ struct net_device *sb_dev);
enum tc_setup_type {
TC_SETUP_QDISC_MQPRIO,
@@ -792,6 +812,7 @@ enum tc_setup_type {
TC_SETUP_QDISC_RED,
TC_SETUP_QDISC_PRIO,
TC_SETUP_QDISC_MQ,
+ TC_SETUP_QDISC_ETF,
};
/* These structures hold the attributes of bpf state that are being passed
@@ -807,11 +828,8 @@ enum bpf_netdev_command {
*/
XDP_SETUP_PROG,
XDP_SETUP_PROG_HW,
- /* Check if a bpf program is set on the device. The callee should
- * set @prog_attached to one of XDP_ATTACHED_* values, note that "true"
- * is equivalent to XDP_ATTACHED_DRV.
- */
XDP_QUERY_PROG,
+ XDP_QUERY_PROG_HW,
/* BPF program for offload callbacks, invoked at program load time. */
BPF_OFFLOAD_VERIFIER_PREP,
BPF_OFFLOAD_TRANSLATE,
@@ -835,9 +853,8 @@ struct netdev_bpf {
struct bpf_prog *prog;
struct netlink_ext_ack *extack;
};
- /* XDP_QUERY_PROG */
+ /* XDP_QUERY_PROG, XDP_QUERY_PROG_HW */
struct {
- u8 prog_attached;
u32 prog_id;
/* flags with which program was installed */
u32 prog_flags;
@@ -855,10 +872,10 @@ struct netdev_bpf {
struct {
struct bpf_offloaded_map *offmap;
};
- /* XDP_SETUP_XSK_UMEM */
+ /* XDP_QUERY_XSK_UMEM, XDP_SETUP_XSK_UMEM */
struct {
- struct xdp_umem *umem;
- u16 queue_id;
+ struct xdp_umem *umem; /* out for query*/
+ u16 queue_id; /* in for query */
} xsk;
};
};
@@ -891,6 +908,8 @@ struct tlsdev_ops {
void (*tls_dev_del)(struct net_device *netdev,
struct tls_context *ctx,
enum tls_offload_ctx_dir direction);
+ void (*tls_dev_resync_rx)(struct net_device *netdev,
+ struct sock *sk, u32 seq, u64 rcd_sn);
};
#endif
@@ -942,7 +961,8 @@ struct dev_ifalias {
* those the driver believes to be appropriate.
*
* u16 (*ndo_select_queue)(struct net_device *dev, struct sk_buff *skb,
- * void *accel_priv, select_queue_fallback_t fallback);
+ * struct net_device *sb_dev,
+ * select_queue_fallback_t fallback);
* Called to decide which queue to use when device supports multiple
* transmit queues.
*
@@ -1214,7 +1234,7 @@ struct net_device_ops {
netdev_features_t features);
u16 (*ndo_select_queue)(struct net_device *dev,
struct sk_buff *skb,
- void *accel_priv,
+ struct net_device *sb_dev,
select_queue_fallback_t fallback);
void (*ndo_change_rx_flags)(struct net_device *dev,
int flags);
@@ -1909,7 +1929,8 @@ struct net_device {
int watchdog_timeo;
#ifdef CONFIG_XPS
- struct xps_dev_maps __rcu *xps_maps;
+ struct xps_dev_maps __rcu *xps_cpus_map;
+ struct xps_dev_maps __rcu *xps_rxqs_map;
#endif
#ifdef CONFIG_NET_CLS_ACT
struct mini_Qdisc __rcu *miniq_egress;
@@ -1978,7 +1999,7 @@ struct net_device {
#ifdef CONFIG_DCB
const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif
- u8 num_tc;
+ s16 num_tc;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
u8 prio_tc_map[TC_BITMASK + 1];
@@ -2032,6 +2053,17 @@ int netdev_get_num_tc(struct net_device *dev)
return dev->num_tc;
}
+void netdev_unbind_sb_channel(struct net_device *dev,
+ struct net_device *sb_dev);
+int netdev_bind_sb_channel_queue(struct net_device *dev,
+ struct net_device *sb_dev,
+ u8 tc, u16 count, u16 offset);
+int netdev_set_sb_channel(struct net_device *dev, u16 channel);
+static inline int netdev_get_sb_channel(struct net_device *dev)
+{
+ return max_t(int, -dev->num_tc, 0);
+}
+
static inline
struct netdev_queue *netdev_get_tx_queue(const struct net_device *dev,
unsigned int index)
@@ -2076,7 +2108,7 @@ static inline void netdev_for_each_tx_queue(struct net_device *dev,
struct netdev_queue *netdev_pick_tx(struct net_device *dev,
struct sk_buff *skb,
- void *accel_priv);
+ struct net_device *sb_dev);
/* returns the headroom that the master device needs to take in account
* when forwarding to this dev
@@ -2255,10 +2287,10 @@ static inline int gro_recursion_inc_test(struct sk_buff *skb)
return ++NAPI_GRO_CB(skb)->recursion_counter == GRO_RECURSION_LIMIT;
}
-typedef struct sk_buff **(*gro_receive_t)(struct sk_buff **, struct sk_buff *);
-static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
- struct sk_buff **head,
- struct sk_buff *skb)
+typedef struct sk_buff *(*gro_receive_t)(struct list_head *, struct sk_buff *);
+static inline struct sk_buff *call_gro_receive(gro_receive_t cb,
+ struct list_head *head,
+ struct sk_buff *skb)
{
if (unlikely(gro_recursion_inc_test(skb))) {
NAPI_GRO_CB(skb)->flush |= 1;
@@ -2268,12 +2300,12 @@ static inline struct sk_buff **call_gro_receive(gro_receive_t cb,
return cb(head, skb);
}
-typedef struct sk_buff **(*gro_receive_sk_t)(struct sock *, struct sk_buff **,
- struct sk_buff *);
-static inline struct sk_buff **call_gro_receive_sk(gro_receive_sk_t cb,
- struct sock *sk,
- struct sk_buff **head,
- struct sk_buff *skb)
+typedef struct sk_buff *(*gro_receive_sk_t)(struct sock *, struct list_head *,
+ struct sk_buff *);
+static inline struct sk_buff *call_gro_receive_sk(gro_receive_sk_t cb,
+ struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb)
{
if (unlikely(gro_recursion_inc_test(skb))) {
NAPI_GRO_CB(skb)->flush |= 1;
@@ -2290,6 +2322,9 @@ struct packet_type {
struct net_device *,
struct packet_type *,
struct net_device *);
+ void (*list_func) (struct list_head *,
+ struct packet_type *,
+ struct net_device *);
bool (*id_match)(struct packet_type *ptype,
struct sock *sk);
void *af_packet_priv;
@@ -2299,8 +2334,8 @@ struct packet_type {
struct offload_callbacks {
struct sk_buff *(*gso_segment)(struct sk_buff *skb,
netdev_features_t features);
- struct sk_buff **(*gro_receive)(struct sk_buff **head,
- struct sk_buff *skb);
+ struct sk_buff *(*gro_receive)(struct list_head *head,
+ struct sk_buff *skb);
int (*gro_complete)(struct sk_buff *skb, int nhoff);
};
@@ -2537,8 +2572,14 @@ void dev_close(struct net_device *dev);
void dev_close_many(struct list_head *head, bool unlink);
void dev_disable_lro(struct net_device *dev);
int dev_loopback_xmit(struct net *net, struct sock *sk, struct sk_buff *newskb);
+u16 dev_pick_tx_zero(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
+u16 dev_pick_tx_cpu_id(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
int dev_queue_xmit(struct sk_buff *skb);
-int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv);
+int dev_queue_xmit_accel(struct sk_buff *skb, struct net_device *sb_dev);
int dev_direct_xmit(struct sk_buff *skb, u16 queue_id);
int register_netdevice(struct net_device *dev);
void unregister_netdevice_queue(struct net_device *dev, struct list_head *head);
@@ -2568,7 +2609,7 @@ struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex);
struct net_device *dev_get_by_napi_id(unsigned int napi_id);
int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_restart(struct net_device *dev);
-int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb);
+int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
{
@@ -2784,15 +2825,35 @@ static inline void skb_gro_remcsum_cleanup(struct sk_buff *skb,
}
#ifdef CONFIG_XFRM_OFFLOAD
-static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
{
if (PTR_ERR(pp) != -EINPROGRESS)
NAPI_GRO_CB(skb)->flush |= flush;
}
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+ struct sk_buff *pp,
+ int flush,
+ struct gro_remcsum *grc)
+{
+ if (PTR_ERR(pp) != -EINPROGRESS) {
+ NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_remcsum_cleanup(skb, grc);
+ skb->remcsum_offload = 0;
+ }
+}
#else
-static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff **pp, int flush)
+static inline void skb_gro_flush_final(struct sk_buff *skb, struct sk_buff *pp, int flush)
+{
+ NAPI_GRO_CB(skb)->flush |= flush;
+}
+static inline void skb_gro_flush_final_remcsum(struct sk_buff *skb,
+ struct sk_buff *pp,
+ int flush,
+ struct gro_remcsum *grc)
{
NAPI_GRO_CB(skb)->flush |= flush;
+ skb_gro_remcsum_cleanup(skb, grc);
+ skb->remcsum_offload = 0;
}
#endif
@@ -3258,6 +3319,92 @@ static inline void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
#ifdef CONFIG_XPS
int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
u16 index);
+int __netif_set_xps_queue(struct net_device *dev, const unsigned long *mask,
+ u16 index, bool is_rxqs_map);
+
+/**
+ * netif_attr_test_mask - Test a CPU or Rx queue set in a mask
+ * @j: CPU/Rx queue index
+ * @mask: bitmask of all cpus/rx queues
+ * @nr_bits: number of bits in the bitmask
+ *
+ * Test if a CPU or Rx queue index is set in a mask of all CPU/Rx queues.
+ */
+static inline bool netif_attr_test_mask(unsigned long j,
+ const unsigned long *mask,
+ unsigned int nr_bits)
+{
+ cpu_max_bits_warn(j, nr_bits);
+ return test_bit(j, mask);
+}
+
+/**
+ * netif_attr_test_online - Test for online CPU/Rx queue
+ * @j: CPU/Rx queue index
+ * @online_mask: bitmask for CPUs/Rx queues that are online
+ * @nr_bits: number of bits in the bitmask
+ *
+ * Returns true if a CPU/Rx queue is online.
+ */
+static inline bool netif_attr_test_online(unsigned long j,
+ const unsigned long *online_mask,
+ unsigned int nr_bits)
+{
+ cpu_max_bits_warn(j, nr_bits);
+
+ if (online_mask)
+ return test_bit(j, online_mask);
+
+ return (j < nr_bits);
+}
+
+/**
+ * netif_attrmask_next - get the next CPU/Rx queue in a cpu/Rx queues mask
+ * @n: CPU/Rx queue index
+ * @srcp: the cpumask/Rx queue mask pointer
+ * @nr_bits: number of bits in the bitmask
+ *
+ * Returns >= nr_bits if no further CPUs/Rx queues set.
+ */
+static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
+ unsigned int nr_bits)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpu_max_bits_warn(n, nr_bits);
+
+ if (srcp)
+ return find_next_bit(srcp, nr_bits, n + 1);
+
+ return n + 1;
+}
+
+/**
+ * netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p
+ * @n: CPU/Rx queue index
+ * @src1p: the first CPUs/Rx queues mask pointer
+ * @src2p: the second CPUs/Rx queues mask pointer
+ * @nr_bits: number of bits in the bitmask
+ *
+ * Returns >= nr_bits if no further CPUs/Rx queues set in both.
+ */
+static inline int netif_attrmask_next_and(int n, const unsigned long *src1p,
+ const unsigned long *src2p,
+ unsigned int nr_bits)
+{
+ /* -1 is a legal arg here. */
+ if (n != -1)
+ cpu_max_bits_warn(n, nr_bits);
+
+ if (src1p && src2p)
+ return find_next_and_bit(src1p, src2p, nr_bits, n + 1);
+ else if (src1p)
+ return find_next_bit(src1p, nr_bits, n + 1);
+ else if (src2p)
+ return find_next_bit(src2p, nr_bits, n + 1);
+
+ return n + 1;
+}
#else
static inline int netif_set_xps_queue(struct net_device *dev,
const struct cpumask *mask,
@@ -3265,6 +3412,13 @@ static inline int netif_set_xps_queue(struct net_device *dev,
{
return 0;
}
+
+static inline int __netif_set_xps_queue(struct net_device *dev,
+ const unsigned long *mask,
+ u16 index, bool is_rxqs_map)
+{
+ return 0;
+}
#endif
/**
@@ -3284,8 +3438,9 @@ int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq);
int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq);
#else
static inline int netif_set_real_num_rx_queues(struct net_device *dev,
- unsigned int rxq)
+ unsigned int rxqs)
{
+ dev->real_num_rx_queues = rxqs;
return 0;
}
#endif
@@ -3364,6 +3519,7 @@ int netif_rx(struct sk_buff *skb);
int netif_rx_ni(struct sk_buff *skb);
int netif_receive_skb(struct sk_buff *skb);
int netif_receive_skb_core(struct sk_buff *skb);
+void netif_receive_skb_list(struct list_head *head);
gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb);
void napi_gro_flush(struct napi_struct *napi, bool flush_old);
struct sk_buff *napi_get_frags(struct napi_struct *napi);
@@ -3398,6 +3554,8 @@ int dev_set_alias(struct net_device *, const char *, size_t);
int dev_get_alias(const struct net_device *, char *, size_t);
int dev_change_net_namespace(struct net_device *, struct net *, const char *);
int __dev_set_mtu(struct net_device *, int);
+int dev_set_mtu_ext(struct net_device *dev, int mtu,
+ struct netlink_ext_ack *extack);
int dev_set_mtu(struct net_device *, int);
int dev_change_tx_queue_len(struct net_device *, unsigned long);
void dev_set_group(struct net_device *, int);
@@ -3415,8 +3573,9 @@ struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
typedef int (*bpf_op_t)(struct net_device *dev, struct netdev_bpf *bpf);
int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
int fd, u32 flags);
-void __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
- struct netdev_bpf *xdp);
+u32 __dev_xdp_query(struct net_device *dev, bpf_op_t xdp_op,
+ enum bpf_netdev_command cmd);
+int xdp_umem_query(struct net_device *dev, u16 queue_id);
int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
int dev_forward_skb(struct net_device *dev, struct sk_buff *skb);
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index dd2052f0efb7..07efffd0c759 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -288,6 +288,24 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk, struct
return ret;
}
+static inline void
+NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
+ struct list_head *head, struct net_device *in, struct net_device *out,
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *))
+{
+ struct sk_buff *skb, *next;
+ struct list_head sublist;
+
+ INIT_LIST_HEAD(&sublist);
+ list_for_each_entry_safe(skb, next, head, list) {
+ list_del(&skb->list);
+ if (nf_hook(pf, hook, net, sk, skb, in, out, okfn) == 1)
+ list_add_tail(&skb->list, &sublist);
+ }
+ /* Put passed packets back on main list */
+ list_splice(&sublist, head);
+}
+
/* Call setsockopt() */
int nf_setsockopt(struct sock *sk, u_int8_t pf, int optval, char __user *opt,
unsigned int len);
@@ -369,6 +387,14 @@ NF_HOOK(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
return okfn(net, sk, skb);
}
+static inline void
+NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
+ struct list_head *head, struct net_device *in, struct net_device *out,
+ int (*okfn)(struct net *, struct sock *, struct sk_buff *))
+{
+ /* nothing to do */
+}
+
static inline int nf_hook(u_int8_t pf, unsigned int hook, struct net *net,
struct sock *sk, struct sk_buff *skb,
struct net_device *indev, struct net_device *outdev,
@@ -388,8 +414,17 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
void nf_ct_attach(struct sk_buff *, const struct sk_buff *);
+struct nf_conntrack_tuple;
+bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+ const struct sk_buff *skb);
#else
static inline void nf_ct_attach(struct sk_buff *new, struct sk_buff *skb) {}
+struct nf_conntrack_tuple;
+static inline bool nf_ct_get_tuple_skb(struct nf_conntrack_tuple *dst_tuple,
+ const struct sk_buff *skb)
+{
+ return false;
+}
#endif
struct nf_conn;
@@ -398,6 +433,8 @@ enum ip_conntrack_info;
struct nf_ct_hook {
int (*update)(struct net *net, struct sk_buff *skb);
void (*destroy)(struct nf_conntrack *);
+ bool (*get_tuple_skb)(struct nf_conntrack_tuple *,
+ const struct sk_buff *);
};
extern struct nf_ct_hook __rcu *nf_ct_hook;
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 3ecc3050be0e..4a520d3304a2 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -29,6 +29,7 @@ struct nfnetlink_subsystem {
__u8 subsys_id; /* nfnetlink subsystem ID */
__u8 cb_count; /* number of callbacks */
const struct nfnl_callback *cb; /* callback for individual types */
+ struct module *owner;
int (*commit)(struct net *net, struct sk_buff *skb);
int (*abort)(struct net *net, struct sk_buff *skb);
void (*cleanup)(struct net *net);
diff --git a/include/linux/netfilter/nf_osf.h b/include/linux/netfilter/nfnetlink_osf.h
index 0e114c492fb8..ecf7dab81e9e 100644
--- a/include/linux/netfilter/nf_osf.h
+++ b/include/linux/netfilter/nfnetlink_osf.h
@@ -1,16 +1,8 @@
-#include <uapi/linux/netfilter/nf_osf.h>
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NFOSF_H
+#define _NFOSF_H
-/* Initial window size option state machine: multiple of mss, mtu or
- * plain numeric value. Can also be made as plain numeric value which
- * is not a multiple of specified value.
- */
-enum nf_osf_window_size_options {
- OSF_WSS_PLAIN = 0,
- OSF_WSS_MSS,
- OSF_WSS_MTU,
- OSF_WSS_MODULO,
- OSF_WSS_MAX,
-};
+#include <uapi/linux/netfilter/nfnetlink_osf.h>
enum osf_fmatch_states {
/* Packet does not match the fingerprint */
@@ -21,6 +13,8 @@ enum osf_fmatch_states {
FMATCH_OPT_WRONG,
};
+extern struct list_head nf_osf_fingers[2];
+
struct nf_osf_finger {
struct rcu_head rcu_head;
struct list_head finger_entry;
@@ -31,3 +25,8 @@ bool nf_osf_match(const struct sk_buff *skb, u_int8_t family,
int hooknum, struct net_device *in, struct net_device *out,
const struct nf_osf_info *info, struct net *net,
const struct list_head *nf_osf_fingers);
+
+const char *nf_osf_find(const struct sk_buff *skb,
+ const struct list_head *nf_osf_fingers);
+
+#endif /* _NFOSF_H */
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index b671fdfd212b..fa0686500970 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -5,17 +5,6 @@
#include <uapi/linux/netfilter_bridge.h>
#include <linux/skbuff.h>
-enum nf_br_hook_priorities {
- NF_BR_PRI_FIRST = INT_MIN,
- NF_BR_PRI_NAT_DST_BRIDGED = -300,
- NF_BR_PRI_FILTER_BRIDGED = -200,
- NF_BR_PRI_BRNF = 0,
- NF_BR_PRI_NAT_DST_OTHER = 100,
- NF_BR_PRI_FILTER_OTHER = 200,
- NF_BR_PRI_NAT_SRC = 300,
- NF_BR_PRI_LAST = INT_MAX,
-};
-
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
diff --git a/include/linux/netfilter_ipv4.h b/include/linux/netfilter_ipv4.h
index b31dabfdb453..95ab5cc64422 100644
--- a/include/linux/netfilter_ipv4.h
+++ b/include/linux/netfilter_ipv4.h
@@ -23,9 +23,6 @@ struct nf_queue_entry;
#ifdef CONFIG_INET
__sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
unsigned int dataoff, u_int8_t protocol);
-__sum16 nf_ip_checksum_partial(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, unsigned int len,
- u_int8_t protocol);
int nf_ip_route(struct net *net, struct dst_entry **dst, struct flowi *fl,
bool strict);
int nf_ip_reroute(struct sk_buff *skb, const struct nf_queue_entry *entry);
@@ -35,14 +32,6 @@ static inline __sum16 nf_ip_checksum(struct sk_buff *skb, unsigned int hook,
{
return 0;
}
-static inline __sum16 nf_ip_checksum_partial(struct sk_buff *skb,
- unsigned int hook,
- unsigned int dataoff,
- unsigned int len,
- u_int8_t protocol)
-{
- return 0;
-}
static inline int nf_ip_route(struct net *net, struct dst_entry **dst,
struct flowi *fl, bool strict)
{
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 288c597e75b3..c0dc4dd78887 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -30,11 +30,6 @@ struct nf_ipv6_ops {
void (*route_input)(struct sk_buff *skb);
int (*fragment)(struct net *net, struct sock *sk, struct sk_buff *skb,
int (*output)(struct net *, struct sock *, struct sk_buff *));
- __sum16 (*checksum)(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, u_int8_t protocol);
- __sum16 (*checksum_partial)(struct sk_buff *skb, unsigned int hook,
- unsigned int dataoff, unsigned int len,
- u_int8_t protocol);
int (*route)(struct net *net, struct dst_entry **dst, struct flowi *fl,
bool strict);
int (*reroute)(struct sk_buff *skb, const struct nf_queue_entry *entry);
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index f3075d6c7e82..71f121b66ca8 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -170,7 +170,6 @@ netlink_skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
struct netlink_callback {
struct sk_buff *skb;
const struct nlmsghdr *nlh;
- int (*start)(struct netlink_callback *);
int (*dump)(struct sk_buff * skb,
struct netlink_callback *cb);
int (*done)(struct netlink_callback *cb);
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 57ffaa20d564..1b06f0b28453 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -374,6 +374,13 @@ enum lock_type4 {
NFS4_WRITEW_LT = 4
};
+enum change_attr_type4 {
+ NFS4_CHANGE_TYPE_IS_MONOTONIC_INCR = 0,
+ NFS4_CHANGE_TYPE_IS_VERSION_COUNTER = 1,
+ NFS4_CHANGE_TYPE_IS_VERSION_COUNTER_NOPNFS = 2,
+ NFS4_CHANGE_TYPE_IS_TIME_METADATA = 3,
+ NFS4_CHANGE_TYPE_IS_UNDEFINED = 4
+};
/* Mandatory Attributes */
#define FATTR4_WORD0_SUPPORTED_ATTRS (1UL << 0)
@@ -441,6 +448,7 @@ enum lock_type4 {
#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
#define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13)
+#define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15)
#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
#define FATTR4_WORD2_MODE_UMASK (1UL << 17)
@@ -527,6 +535,7 @@ enum {
NFSPROC4_CLNT_LAYOUTSTATS,
NFSPROC4_CLNT_CLONE,
NFSPROC4_CLNT_COPY,
+ NFSPROC4_CLNT_OFFLOAD_CANCEL,
NFSPROC4_CLNT_LOOKUPP,
};
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 2f129bbfaae8..a0831e9d19c9 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -185,6 +185,17 @@ struct nfs_inode {
struct inode vfs_inode;
};
+struct nfs4_copy_state {
+ struct list_head copies;
+ nfs4_stateid stateid;
+ struct completion completion;
+ uint64_t count;
+ struct nfs_writeverf verf;
+ int error;
+ int flags;
+ struct nfs4_state *parent_state;
+};
+
/*
* Access bit flags
*/
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 74ae3e1d19a0..bf39d9c92201 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -28,7 +28,6 @@ struct nfs41_impl_id;
struct nfs_client {
refcount_t cl_count;
atomic_t cl_mds_count;
- seqcount_t cl_callback_count;
int cl_cons_state; /* current construction state (-ve: init error) */
#define NFS_CS_READY 0 /* ready to be used */
#define NFS_CS_INITING 1 /* busy initialising */
@@ -122,6 +121,7 @@ struct nfs_client {
#endif
struct net *cl_net;
+ struct list_head pending_cb_stateids;
};
/*
@@ -209,6 +209,7 @@ struct nfs_server {
struct list_head state_owners_lru;
struct list_head layouts;
struct list_head delegations;
+ struct list_head ss_copies;
unsigned long mig_gen;
unsigned long mig_status;
@@ -256,5 +257,6 @@ struct nfs_server {
#define NFS_CAP_LAYOUTSTATS (1U << 22)
#define NFS_CAP_CLONE (1U << 23)
#define NFS_CAP_COPY (1U << 24)
+#define NFS_CAP_OFFLOAD_CANCEL (1U << 25)
#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 9dee3c23895d..bd1c889a9ed9 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -271,7 +271,6 @@ struct nfs4_layoutget {
struct nfs4_layoutget_args args;
struct nfs4_layoutget_res res;
struct rpc_cred *cred;
- unsigned callback_count;
gfp_t gfp_flags;
};
@@ -1389,9 +1388,11 @@ struct nfs42_copy_args {
u64 dst_pos;
u64 count;
+ bool sync;
};
struct nfs42_write_res {
+ nfs4_stateid stateid;
u64 count;
struct nfs_writeverf verifier;
};
@@ -1404,6 +1405,18 @@ struct nfs42_copy_res {
struct nfs_commitres commit_res;
};
+struct nfs42_offload_status_args {
+ struct nfs4_sequence_args osa_seq_args;
+ struct nfs_fh *osa_src_fh;
+ nfs4_stateid osa_stateid;
+};
+
+struct nfs42_offload_status_res {
+ struct nfs4_sequence_res osr_seq_res;
+ uint64_t osr_count;
+ int osr_status;
+};
+
struct nfs42_seek_args {
struct nfs4_sequence_args seq_args;
@@ -1438,6 +1451,8 @@ enum {
NFS_IOHDR_EOF,
NFS_IOHDR_REDO,
NFS_IOHDR_STAT,
+ NFS_IOHDR_RESEND_PNFS,
+ NFS_IOHDR_RESEND_MDS,
};
struct nfs_io_completion;
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index b8d868d23e79..08f9247e9827 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -45,12 +45,18 @@ extern void touch_softlockup_watchdog(void);
extern void touch_softlockup_watchdog_sync(void);
extern void touch_all_softlockup_watchdogs(void);
extern unsigned int softlockup_panic;
-#else
+
+extern int lockup_detector_online_cpu(unsigned int cpu);
+extern int lockup_detector_offline_cpu(unsigned int cpu);
+#else /* CONFIG_SOFTLOCKUP_DETECTOR */
static inline void touch_softlockup_watchdog_sched(void) { }
static inline void touch_softlockup_watchdog(void) { }
static inline void touch_softlockup_watchdog_sync(void) { }
static inline void touch_all_softlockup_watchdogs(void) { }
-#endif
+
+#define lockup_detector_online_cpu NULL
+#define lockup_detector_offline_cpu NULL
+#endif /* CONFIG_SOFTLOCKUP_DETECTOR */
#ifdef CONFIG_DETECT_HUNG_TASK
void reset_hung_task_detector(void);
diff --git a/include/linux/node.h b/include/linux/node.h
index 6d336e38d155..257bb3d6d014 100644
--- a/include/linux/node.h
+++ b/include/linux/node.h
@@ -33,10 +33,10 @@ typedef void (*node_registration_func_t)(struct node *);
#if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA)
extern int link_mem_sections(int nid, unsigned long start_pfn,
- unsigned long nr_pages, bool check_nid);
+ unsigned long end_pfn);
#else
static inline int link_mem_sections(int nid, unsigned long start_pfn,
- unsigned long nr_pages, bool check_nid)
+ unsigned long end_pfn)
{
return 0;
}
@@ -54,12 +54,14 @@ static inline int register_one_node(int nid)
if (node_online(nid)) {
struct pglist_data *pgdat = NODE_DATA(nid);
+ unsigned long start_pfn = pgdat->node_start_pfn;
+ unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
error = __register_one_node(nid);
if (error)
return error;
/* link memory sections under this node */
- error = link_mem_sections(nid, pgdat->node_start_pfn, pgdat->node_spanned_pages, true);
+ error = link_mem_sections(nid, start_pfn, end_pfn);
}
return error;
@@ -69,7 +71,7 @@ extern void unregister_one_node(int nid);
extern int register_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int unregister_cpu_under_node(unsigned int cpu, unsigned int nid);
extern int register_mem_sect_under_node(struct memory_block *mem_blk,
- int nid, bool check_nid);
+ void *arg);
extern int unregister_mem_sect_under_nodes(struct memory_block *mem_blk,
unsigned long phys_index);
@@ -99,7 +101,7 @@ static inline int unregister_cpu_under_node(unsigned int cpu, unsigned int nid)
return 0;
}
static inline int register_mem_sect_under_node(struct memory_block *mem_blk,
- int nid, bool check_nid)
+ void *arg)
{
return 0;
}
diff --git a/include/linux/nodemask.h b/include/linux/nodemask.h
index 1fbde8a880d9..5a30ad594ccc 100644
--- a/include/linux/nodemask.h
+++ b/include/linux/nodemask.h
@@ -518,7 +518,7 @@ static inline int node_random(const nodemask_t *mask)
* NODEMASK_ALLOC(type, name) allocates an object with a specified type and
* name.
*/
-#if NODES_SHIFT > 8 /* nodemask_t > 256 bytes */
+#if NODES_SHIFT > 8 /* nodemask_t > 32 bytes */
#define NODEMASK_ALLOC(type, name, gfp_flags) \
type *name = kmalloc(sizeof(*name), gfp_flags)
#define NODEMASK_FREE(m) kfree(m)
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 2950ce957656..68e91ef5494c 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -242,7 +242,12 @@ struct nvme_id_ctrl {
__le32 sanicap;
__le32 hmminds;
__le16 hmmaxd;
- __u8 rsvd338[174];
+ __u8 rsvd338[4];
+ __u8 anatt;
+ __u8 anacap;
+ __le32 anagrpmax;
+ __le32 nanagrpid;
+ __u8 rsvd352[160];
__u8 sqes;
__u8 cqes;
__le16 maxcmd;
@@ -254,11 +259,12 @@ struct nvme_id_ctrl {
__le16 awun;
__le16 awupf;
__u8 nvscc;
- __u8 rsvd531;
+ __u8 nwpc;
__le16 acwu;
__u8 rsvd534[2];
__le32 sgls;
- __u8 rsvd540[228];
+ __le32 mnan;
+ __u8 rsvd544[224];
char subnqn[256];
__u8 rsvd1024[768];
__le32 ioccsz;
@@ -312,7 +318,11 @@ struct nvme_id_ns {
__le16 nabspf;
__le16 noiob;
__u8 nvmcap[16];
- __u8 rsvd64[40];
+ __u8 rsvd64[28];
+ __le32 anagrpid;
+ __u8 rsvd96[3];
+ __u8 nsattr;
+ __u8 rsvd100[4];
__u8 nguid[16];
__u8 eui64[8];
struct nvme_lbaf lbaf[16];
@@ -425,6 +435,32 @@ struct nvme_effects_log {
__u8 resv[2048];
};
+enum nvme_ana_state {
+ NVME_ANA_OPTIMIZED = 0x01,
+ NVME_ANA_NONOPTIMIZED = 0x02,
+ NVME_ANA_INACCESSIBLE = 0x03,
+ NVME_ANA_PERSISTENT_LOSS = 0x04,
+ NVME_ANA_CHANGE = 0x0f,
+};
+
+struct nvme_ana_group_desc {
+ __le32 grpid;
+ __le32 nnsids;
+ __le64 chgcnt;
+ __u8 state;
+ __u8 rsvd17[15];
+ __le32 nsids[];
+};
+
+/* flag for the log specific field of the ANA log */
+#define NVME_ANA_LOG_RGO (1 << 0)
+
+struct nvme_ana_rsp_hdr {
+ __le64 chgcnt;
+ __le16 ngrps;
+ __le16 rsvd10[3];
+};
+
enum {
NVME_SMART_CRIT_SPARE = 1 << 0,
NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
@@ -444,11 +480,13 @@ enum {
enum {
NVME_AER_NOTICE_NS_CHANGED = 0x00,
NVME_AER_NOTICE_FW_ACT_STARTING = 0x01,
+ NVME_AER_NOTICE_ANA = 0x03,
};
enum {
NVME_AEN_CFG_NS_ATTR = 1 << 8,
NVME_AEN_CFG_FW_ACT = 1 << 9,
+ NVME_AEN_CFG_ANA_CHANGE = 1 << 11,
};
struct nvme_lba_range_type {
@@ -749,15 +787,22 @@ enum {
NVME_FEAT_HOST_MEM_BUF = 0x0d,
NVME_FEAT_TIMESTAMP = 0x0e,
NVME_FEAT_KATO = 0x0f,
+ NVME_FEAT_HCTM = 0x10,
+ NVME_FEAT_NOPSC = 0x11,
+ NVME_FEAT_RRL = 0x12,
+ NVME_FEAT_PLM_CONFIG = 0x13,
+ NVME_FEAT_PLM_WINDOW = 0x14,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
NVME_FEAT_RESV_MASK = 0x82,
NVME_FEAT_RESV_PERSIST = 0x83,
+ NVME_FEAT_WRITE_PROTECT = 0x84,
NVME_LOG_ERROR = 0x01,
NVME_LOG_SMART = 0x02,
NVME_LOG_FW_SLOT = 0x03,
NVME_LOG_CHANGED_NS = 0x04,
NVME_LOG_CMD_EFFECTS = 0x05,
+ NVME_LOG_ANA = 0x0c,
NVME_LOG_DISC = 0x70,
NVME_LOG_RESERVATION = 0x80,
NVME_FWACT_REPL = (0 << 3),
@@ -765,6 +810,14 @@ enum {
NVME_FWACT_ACTV = (2 << 3),
};
+/* NVMe Namespace Write Protect State */
+enum {
+ NVME_NS_NO_WRITE_PROTECT = 0,
+ NVME_NS_WRITE_PROTECT,
+ NVME_NS_WRITE_PROTECT_POWER_CYCLE,
+ NVME_NS_WRITE_PROTECT_PERMANENT,
+};
+
#define NVME_MAX_CHANGED_NAMESPACES 1024
struct nvme_identify {
@@ -880,7 +933,7 @@ struct nvme_get_log_page_command {
__u64 rsvd2[2];
union nvme_data_ptr dptr;
__u8 lid;
- __u8 rsvd10;
+ __u8 lsp; /* upper 4 bits reserved */
__le16 numdl;
__le16 numdu;
__u16 rsvd11;
@@ -1111,6 +1164,8 @@ enum {
NVME_SC_SGL_INVALID_OFFSET = 0x16,
NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
+ NVME_SC_NS_WRITE_PROTECTED = 0x20,
+
NVME_SC_LBA_RANGE = 0x80,
NVME_SC_CAP_EXCEEDED = 0x81,
NVME_SC_NS_NOT_READY = 0x82,
@@ -1180,6 +1235,13 @@ enum {
NVME_SC_ACCESS_DENIED = 0x286,
NVME_SC_UNWRITTEN_BLOCK = 0x287,
+ /*
+ * Path-related Errors:
+ */
+ NVME_SC_ANA_PERSISTENT_LOSS = 0x301,
+ NVME_SC_ANA_INACCESSIBLE = 0x302,
+ NVME_SC_ANA_TRANSITION = 0x303,
+
NVME_SC_DNR = 0x4000,
};
diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h
index 4fa654e4b5a9..f3d40dd7bb66 100644
--- a/include/linux/of_iommu.h
+++ b/include/linux/of_iommu.h
@@ -32,8 +32,4 @@ static inline const struct iommu_ops *of_iommu_configure(struct device *dev,
#endif /* CONFIG_OF_IOMMU */
-extern struct of_device_id __iommu_of_table;
-
-#define IOMMU_OF_DECLARE(name, compat) OF_DECLARE_1(iommu, name, compat, NULL)
-
#endif /* __OF_IOMMU_H */
diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h
index c726bd833761..6dbcd2da0332 100644
--- a/include/linux/omap-mailbox.h
+++ b/include/linux/omap-mailbox.h
@@ -1,9 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* omap-mailbox: interprocessor communication module for OMAP
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef OMAP_MAILBOX_H
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 6adac113e96d..69864a547663 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -88,14 +88,14 @@ static inline bool mm_is_oom_victim(struct mm_struct *mm)
*
* Return 0 when the PF is safe VM_FAULT_SIGBUS otherwise.
*/
-static inline int check_stable_address_space(struct mm_struct *mm)
+static inline vm_fault_t check_stable_address_space(struct mm_struct *mm)
{
if (unlikely(test_bit(MMF_UNSTABLE, &mm->flags)))
return VM_FAULT_SIGBUS;
return 0;
}
-void __oom_reap_task_mm(struct mm_struct *mm);
+bool __oom_reap_task_mm(struct mm_struct *mm);
extern unsigned long oom_badness(struct task_struct *p,
struct mem_cgroup *memcg, const nodemask_t *nodemask,
diff --git a/include/linux/openvswitch.h b/include/linux/openvswitch.h
index e6b240b6196c..379affc63e24 100644
--- a/include/linux/openvswitch.h
+++ b/include/linux/openvswitch.h
@@ -21,4 +21,9 @@
#include <uapi/linux/openvswitch.h>
+#define OVS_CLONE_ATTR_EXEC 0 /* Specify an u32 value. When nonzero,
+ * actions in clone will not change flow
+ * keys. False otherwise.
+ */
+
#endif /* _LINUX_OPENVSWITCH_H */
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index 8712ff70995f..40b48e2133cb 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -202,6 +202,37 @@
#endif /* COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW */
+/** check_shl_overflow() - Calculate a left-shifted value and check overflow
+ *
+ * @a: Value to be shifted
+ * @s: How many bits left to shift
+ * @d: Pointer to where to store the result
+ *
+ * Computes *@d = (@a << @s)
+ *
+ * Returns true if '*d' cannot hold the result or when 'a << s' doesn't
+ * make sense. Example conditions:
+ * - 'a << s' causes bits to be lost when stored in *d.
+ * - 's' is garbage (e.g. negative) or so large that the result of
+ * 'a << s' is guaranteed to be 0.
+ * - 'a' is negative.
+ * - 'a << s' sets the sign bit, if any, in '*d'.
+ *
+ * '*d' will hold the results of the attempted shift, but is not
+ * considered "safe for use" if false is returned.
+ */
+#define check_shl_overflow(a, s, d) ({ \
+ typeof(a) _a = a; \
+ typeof(s) _s = s; \
+ typeof(d) _d = d; \
+ u64 _a_full = _a; \
+ unsigned int _to_shift = \
+ _s >= 0 && _s < 8 * sizeof(*d) ? _s : 0; \
+ *_d = (_a_full << _to_shift); \
+ (_to_shift != _s || *_d < 0 || _a < 0 || \
+ (*_d >> _to_shift) != _a); \
+})
+
/**
* array_size() - Calculate size of 2-dimensional array.
*
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 901943e4754b..74bee8cecf4c 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -369,8 +369,13 @@ PAGEFLAG_FALSE(Uncached)
PAGEFLAG(HWPoison, hwpoison, PF_ANY)
TESTSCFLAG(HWPoison, hwpoison, PF_ANY)
#define __PG_HWPOISON (1UL << PG_hwpoison)
+extern bool set_hwpoison_free_buddy_page(struct page *page);
#else
PAGEFLAG_FALSE(HWPoison)
+static inline bool set_hwpoison_free_buddy_page(struct page *page)
+{
+ return 0;
+}
#define __PG_HWPOISON 0
#endif
diff --git a/include/linux/page_ext.h b/include/linux/page_ext.h
index ca5461efae2f..f84f167ec04c 100644
--- a/include/linux/page_ext.h
+++ b/include/linux/page_ext.h
@@ -16,18 +16,7 @@ struct page_ext_operations {
#ifdef CONFIG_PAGE_EXTENSION
-/*
- * page_ext->flags bits:
- *
- * PAGE_EXT_DEBUG_POISON is set for poisoned pages. This is used to
- * implement generic debug pagealloc feature. The pages are filled with
- * poison patterns and set this flag after free_pages(). The poisoned
- * pages are verified whether the patterns are not corrupted and clear
- * the flag before alloc_pages().
- */
-
enum page_ext_flags {
- PAGE_EXT_DEBUG_POISON, /* Page is poisoned */
PAGE_EXT_DEBUG_GUARD,
PAGE_EXT_OWNER,
#if defined(CONFIG_IDLE_PAGE_TRACKING) && !defined(CONFIG_64BIT)
@@ -61,7 +50,7 @@ static inline void page_ext_init(void)
}
#endif
-struct page_ext *lookup_page_ext(struct page *page);
+struct page_ext *lookup_page_ext(const struct page *page);
#else /* !CONFIG_PAGE_EXTENSION */
struct page_ext;
@@ -70,7 +59,7 @@ static inline void pgdat_page_ext_init(struct pglist_data *pgdat)
{
}
-static inline struct page_ext *lookup_page_ext(struct page *page)
+static inline struct page_ext *lookup_page_ext(const struct page *page)
{
return NULL;
}
diff --git a/include/linux/pci-dma-compat.h b/include/linux/pci-dma-compat.h
index 0dd1a3f7b309..c3f1b44ade29 100644
--- a/include/linux/pci-dma-compat.h
+++ b/include/linux/pci-dma-compat.h
@@ -8,10 +8,10 @@
#include <linux/dma-mapping.h>
/* This defines the direction arg to the DMA mapping routines. */
-#define PCI_DMA_BIDIRECTIONAL 0
-#define PCI_DMA_TODEVICE 1
-#define PCI_DMA_FROMDEVICE 2
-#define PCI_DMA_NONE 3
+#define PCI_DMA_BIDIRECTIONAL DMA_BIDIRECTIONAL
+#define PCI_DMA_TODEVICE DMA_TO_DEVICE
+#define PCI_DMA_FROMDEVICE DMA_FROM_DEVICE
+#define PCI_DMA_NONE DMA_NONE
static inline void *
pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index 243eaa5a66ff..37dab8116901 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -17,6 +17,7 @@ enum pci_epc_irq_type {
PCI_EPC_IRQ_UNKNOWN,
PCI_EPC_IRQ_LEGACY,
PCI_EPC_IRQ_MSI,
+ PCI_EPC_IRQ_MSIX,
};
/**
@@ -30,7 +31,11 @@ enum pci_epc_irq_type {
* capability register
* @get_msi: ops to get the number of MSI interrupts allocated by the RC from
* the MSI capability register
- * @raise_irq: ops to raise a legacy or MSI interrupt
+ * @set_msix: ops to set the requested number of MSI-X interrupts in the
+ * MSI-X capability register
+ * @get_msix: ops to get the number of MSI-X interrupts allocated by the RC
+ * from the MSI-X capability register
+ * @raise_irq: ops to raise a legacy, MSI or MSI-X interrupt
* @start: ops to start the PCI link
* @stop: ops to stop the PCI link
* @owner: the module owner containing the ops
@@ -48,8 +53,10 @@ struct pci_epc_ops {
phys_addr_t addr);
int (*set_msi)(struct pci_epc *epc, u8 func_no, u8 interrupts);
int (*get_msi)(struct pci_epc *epc, u8 func_no);
+ int (*set_msix)(struct pci_epc *epc, u8 func_no, u16 interrupts);
+ int (*get_msix)(struct pci_epc *epc, u8 func_no);
int (*raise_irq)(struct pci_epc *epc, u8 func_no,
- enum pci_epc_irq_type type, u8 interrupt_num);
+ enum pci_epc_irq_type type, u16 interrupt_num);
int (*start)(struct pci_epc *epc);
void (*stop)(struct pci_epc *epc);
struct module *owner;
@@ -95,6 +102,7 @@ struct pci_epc {
#define EPC_FEATURE_NO_LINKUP_NOTIFIER BIT(0)
#define EPC_FEATURE_BAR_MASK (BIT(1) | BIT(2) | BIT(3))
+#define EPC_FEATURE_MSIX_AVAILABLE BIT(4)
#define EPC_FEATURE_SET_BAR(features, bar) \
(features |= (EPC_FEATURE_BAR_MASK & (bar << 1)))
#define EPC_FEATURE_GET_BAR(features) \
@@ -144,8 +152,10 @@ void pci_epc_unmap_addr(struct pci_epc *epc, u8 func_no,
phys_addr_t phys_addr);
int pci_epc_set_msi(struct pci_epc *epc, u8 func_no, u8 interrupts);
int pci_epc_get_msi(struct pci_epc *epc, u8 func_no);
+int pci_epc_set_msix(struct pci_epc *epc, u8 func_no, u16 interrupts);
+int pci_epc_get_msix(struct pci_epc *epc, u8 func_no);
int pci_epc_raise_irq(struct pci_epc *epc, u8 func_no,
- enum pci_epc_irq_type type, u8 interrupt_num);
+ enum pci_epc_irq_type type, u16 interrupt_num);
int pci_epc_start(struct pci_epc *epc);
void pci_epc_stop(struct pci_epc *epc);
struct pci_epc *pci_epc_get(const char *epc_name);
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
index 4e7764935fa8..ec02f58758c8 100644
--- a/include/linux/pci-epf.h
+++ b/include/linux/pci-epf.h
@@ -119,6 +119,7 @@ struct pci_epf {
struct pci_epf_header *header;
struct pci_epf_bar bar[6];
u8 msi_interrupts;
+ u16 msix_interrupts;
u8 func_no;
struct pci_epc *epc;
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 340029b2fb38..e72ca8dd6241 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -261,6 +261,9 @@ enum pci_bus_speed {
PCI_SPEED_UNKNOWN = 0xff,
};
+enum pci_bus_speed pcie_get_speed_cap(struct pci_dev *dev);
+enum pcie_link_width pcie_get_width_cap(struct pci_dev *dev);
+
struct pci_cap_saved_data {
u16 cap_nr;
bool cap_extended;
@@ -299,6 +302,7 @@ struct pci_dev {
u8 hdr_type; /* PCI header type (`multi' flag masked out) */
#ifdef CONFIG_PCIEAER
u16 aer_cap; /* AER capability offset */
+ struct aer_stats *aer_stats; /* AER stats for this device */
#endif
u8 pcie_cap; /* PCIe capability offset */
u8 msi_cap; /* MSI capability offset */
@@ -350,6 +354,7 @@ struct pci_dev {
unsigned int ltr_path:1; /* Latency Tolerance Reporting
supported from root to here */
#endif
+ unsigned int eetlp_prefix_path:1; /* End-to-End TLP Prefix */
pci_channel_state_t error_state; /* Current connectivity state */
struct device dev; /* Generic device interface */
@@ -368,7 +373,6 @@ struct pci_dev {
unsigned int transparent:1; /* Subtractive decode bridge */
unsigned int multifunction:1; /* Multi-function device */
- unsigned int is_added:1;
unsigned int is_busmaster:1; /* Is busmaster */
unsigned int no_msi:1; /* May not use MSI */
unsigned int no_64bit_msi:1; /* May only use 32-bit MSIs */
@@ -388,6 +392,7 @@ struct pci_dev {
unsigned int is_virtfn:1;
unsigned int reset_fn:1;
unsigned int is_hotplug_bridge:1;
+ unsigned int shpc_managed:1; /* SHPC owned by shpchp */
unsigned int is_thunderbolt:1; /* Thunderbolt controller */
unsigned int __aer_firmware_first_valid:1;
unsigned int __aer_firmware_first:1;
@@ -819,6 +824,21 @@ struct pci_driver {
.vendor = PCI_VENDOR_ID_##vend, .device = (dev), \
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0
+/**
+ * PCI_DEVICE_DATA - macro used to describe a specific PCI device in very short form
+ * @vend: the vendor name (without PCI_VENDOR_ID_ prefix)
+ * @dev: the device name (without PCI_DEVICE_ID_<vend>_ prefix)
+ * @data: the driver data to be filled
+ *
+ * This macro is used to create a struct pci_device_id that matches a
+ * specific PCI device. The subvendor, and subdevice fields will be set
+ * to PCI_ANY_ID.
+ */
+#define PCI_DEVICE_DATA(vend, dev, data) \
+ .vendor = PCI_VENDOR_ID_##vend, .device = PCI_DEVICE_ID_##vend##_##dev, \
+ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \
+ .driver_data = (kernel_ulong_t)(data)
+
enum {
PCI_REASSIGN_ALL_RSRC = 0x00000001, /* Ignore firmware setup */
PCI_REASSIGN_ALL_BUS = 0x00000002, /* Reassign all bus numbers */
@@ -1089,20 +1109,17 @@ u32 pcie_bandwidth_available(struct pci_dev *dev, struct pci_dev **limiting_dev,
enum pci_bus_speed *speed,
enum pcie_link_width *width);
void pcie_print_link_status(struct pci_dev *dev);
+bool pcie_has_flr(struct pci_dev *dev);
int pcie_flr(struct pci_dev *dev);
int __pci_reset_function_locked(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
int pci_reset_function_locked(struct pci_dev *dev);
int pci_try_reset_function(struct pci_dev *dev);
int pci_probe_reset_slot(struct pci_slot *slot);
-int pci_reset_slot(struct pci_slot *slot);
-int pci_try_reset_slot(struct pci_slot *slot);
int pci_probe_reset_bus(struct pci_bus *bus);
-int pci_reset_bus(struct pci_bus *bus);
-int pci_try_reset_bus(struct pci_bus *bus);
+int pci_reset_bus(struct pci_dev *dev);
void pci_reset_secondary_bus(struct pci_dev *dev);
void pcibios_reset_secondary_bus(struct pci_dev *dev);
-int pci_reset_bridge_secondary_bus(struct pci_dev *dev);
void pci_update_resource(struct pci_dev *dev, int resno);
int __must_check pci_assign_resource(struct pci_dev *dev, int i);
int __must_check pci_reassign_resource(struct pci_dev *dev, int i, resource_size_t add_size, resource_size_t align);
@@ -1122,7 +1139,6 @@ int pci_enable_rom(struct pci_dev *pdev);
void pci_disable_rom(struct pci_dev *pdev);
void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
-size_t pci_get_rom_size(struct pci_dev *pdev, void __iomem *rom, size_t size);
void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
/* Power management related routines */
@@ -1240,6 +1256,8 @@ int pci_register_io_range(struct fwnode_handle *fwnode, phys_addr_t addr,
unsigned long pci_address_to_pio(phys_addr_t addr);
phys_addr_t pci_pio_to_address(unsigned long pio);
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
+int devm_pci_remap_iospace(struct device *dev, const struct resource *res,
+ phys_addr_t phys_addr);
void pci_unmap_iospace(struct resource *res);
void __iomem *devm_pci_remap_cfgspace(struct device *dev,
resource_size_t offset,
@@ -1468,13 +1486,9 @@ static inline bool pcie_aspm_support_enabled(void) { return false; }
#endif
#ifdef CONFIG_PCIEAER
-void pci_no_aer(void);
bool pci_aer_available(void);
-int pci_aer_init(struct pci_dev *dev);
#else
-static inline void pci_no_aer(void) { }
static inline bool pci_aer_available(void) { return false; }
-static inline int pci_aer_init(struct pci_dev *d) { return -ENODEV; }
#endif
#ifdef CONFIG_PCIE_ECRC
@@ -1795,7 +1809,11 @@ struct pci_fixup {
u16 device; /* Or PCI_ANY_ID */
u32 class; /* Or PCI_ANY_ID */
unsigned int class_shift; /* should be 0, 8, 16 */
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+ int hook_offset;
+#else
void (*hook)(struct pci_dev *dev);
+#endif
};
enum pci_fixup_pass {
@@ -1809,12 +1827,28 @@ enum pci_fixup_pass {
pci_fixup_suspend_late, /* pci_device_suspend_late() */
};
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+#define __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, hook) \
+ __ADDRESSABLE(hook) \
+ asm(".section " #sec ", \"a\" \n" \
+ ".balign 16 \n" \
+ ".short " #vendor ", " #device " \n" \
+ ".long " #class ", " #class_shift " \n" \
+ ".long " #hook " - . \n" \
+ ".previous \n");
+#define DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, hook) \
+ __DECLARE_PCI_FIXUP_SECTION(sec, name, vendor, device, class, \
+ class_shift, hook)
+#else
/* Anonymous variables would be nice... */
#define DECLARE_PCI_FIXUP_SECTION(section, name, vendor, device, class, \
class_shift, hook) \
static const struct pci_fixup __PASTE(__pci_fixup_##name,__LINE__) __used \
__attribute__((__section__(#section), aligned((sizeof(void *))))) \
= { vendor, device, class, class_shift, hook };
+#endif
#define DECLARE_PCI_FIXUP_CLASS_EARLY(vendor, device, class, \
class_shift, hook) \
@@ -1876,20 +1910,9 @@ enum pci_fixup_pass {
#ifdef CONFIG_PCI_QUIRKS
void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
-int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
-int pci_dev_specific_enable_acs(struct pci_dev *dev);
#else
static inline void pci_fixup_device(enum pci_fixup_pass pass,
struct pci_dev *dev) { }
-static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
- u16 acs_flags)
-{
- return -ENOTTY;
-}
-static inline int pci_dev_specific_enable_acs(struct pci_dev *dev)
-{
- return -ENOTTY;
-}
#endif
void __iomem *pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen);
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index cf5e22103f68..a6d6650a0490 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -80,15 +80,12 @@ struct hotplug_slot_info {
* @ops: pointer to the &struct hotplug_slot_ops to be used for this slot
* @info: pointer to the &struct hotplug_slot_info for the initial values for
* this slot.
- * @release: called during pci_hp_deregister to free memory allocated in a
- * hotplug_slot structure.
* @private: used by the hotplug pci controller driver to store whatever it
* needs.
*/
struct hotplug_slot {
struct hotplug_slot_ops *ops;
struct hotplug_slot_info *info;
- void (*release) (struct hotplug_slot *slot);
void *private;
/* Variables below this are for use only by the hotplug pci core. */
@@ -104,13 +101,23 @@ static inline const char *hotplug_slot_name(const struct hotplug_slot *slot)
int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *pbus, int nr,
const char *name, struct module *owner,
const char *mod_name);
-int pci_hp_deregister(struct hotplug_slot *slot);
+int __pci_hp_initialize(struct hotplug_slot *slot, struct pci_bus *bus, int nr,
+ const char *name, struct module *owner,
+ const char *mod_name);
+int pci_hp_add(struct hotplug_slot *slot);
+
+void pci_hp_del(struct hotplug_slot *slot);
+void pci_hp_destroy(struct hotplug_slot *slot);
+void pci_hp_deregister(struct hotplug_slot *slot);
+
int __must_check pci_hp_change_slot_info(struct hotplug_slot *slot,
struct hotplug_slot_info *info);
/* use a define to avoid include chaining to get THIS_MODULE & friends */
#define pci_hp_register(slot, pbus, devnr, name) \
__pci_hp_register(slot, pbus, devnr, name, THIS_MODULE, KBUILD_MODNAME)
+#define pci_hp_initialize(slot, bus, nr, name) \
+ __pci_hp_initialize(slot, bus, nr, name, THIS_MODULE, KBUILD_MODNAME)
/* PCI Setting Record (Type 0) */
struct hpp_type0 {
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 29502238e510..99d366cb0e9f 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -1668,6 +1668,7 @@
#define PCI_DEVICE_ID_COMPEX_ENET100VG4 0x0112
#define PCI_VENDOR_ID_PMC_Sierra 0x11f8
+#define PCI_VENDOR_ID_MICROSEMI 0x11f8
#define PCI_VENDOR_ID_RP 0x11fe
#define PCI_DEVICE_ID_RP32INTF 0x0001
@@ -2541,6 +2542,7 @@
#define PCI_DEVICE_ID_NETRONOME_NFP3200 0x3200
#define PCI_DEVICE_ID_NETRONOME_NFP3240 0x3240
#define PCI_DEVICE_ID_NETRONOME_NFP4000 0x4000
+#define PCI_DEVICE_ID_NETRONOME_NFP5000 0x5000
#define PCI_DEVICE_ID_NETRONOME_NFP6000 0x6000
#define PCI_DEVICE_ID_NETRONOME_NFP6000_VF 0x6003
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 296bbe49d5d1..70b7123f38c7 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -149,4 +149,6 @@ extern phys_addr_t per_cpu_ptr_to_phys(void *addr);
(typeof(type) __percpu *)__alloc_percpu(sizeof(type), \
__alignof__(type))
+extern unsigned long pcpu_nr_pages(void);
+
#endif /* __LINUX_PERCPU_H */
diff --git a/include/linux/percpu_ida.h b/include/linux/percpu_ida.h
deleted file mode 100644
index 07d78e4653bc..000000000000
--- a/include/linux/percpu_ida.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __PERCPU_IDA_H__
-#define __PERCPU_IDA_H__
-
-#include <linux/types.h>
-#include <linux/bitops.h>
-#include <linux/init.h>
-#include <linux/sched.h>
-#include <linux/spinlock_types.h>
-#include <linux/wait.h>
-#include <linux/cpumask.h>
-
-struct percpu_ida_cpu;
-
-struct percpu_ida {
- /*
- * number of tags available to be allocated, as passed to
- * percpu_ida_init()
- */
- unsigned nr_tags;
- unsigned percpu_max_size;
- unsigned percpu_batch_size;
-
- struct percpu_ida_cpu __percpu *tag_cpu;
-
- /*
- * Bitmap of cpus that (may) have tags on their percpu freelists:
- * steal_tags() uses this to decide when to steal tags, and which cpus
- * to try stealing from.
- *
- * It's ok for a freelist to be empty when its bit is set - steal_tags()
- * will just keep looking - but the bitmap _must_ be set whenever a
- * percpu freelist does have tags.
- */
- cpumask_t cpus_have_tags;
-
- struct {
- spinlock_t lock;
- /*
- * When we go to steal tags from another cpu (see steal_tags()),
- * we want to pick a cpu at random. Cycling through them every
- * time we steal is a bit easier and more or less equivalent:
- */
- unsigned cpu_last_stolen;
-
- /* For sleeping on allocation failure */
- wait_queue_head_t wait;
-
- /*
- * Global freelist - it's a stack where nr_free points to the
- * top
- */
- unsigned nr_free;
- unsigned *freelist;
- } ____cacheline_aligned_in_smp;
-};
-
-/*
- * Number of tags we move between the percpu freelist and the global freelist at
- * a time
- */
-#define IDA_DEFAULT_PCPU_BATCH_MOVE 32U
-/* Max size of percpu freelist, */
-#define IDA_DEFAULT_PCPU_SIZE ((IDA_DEFAULT_PCPU_BATCH_MOVE * 3) / 2)
-
-int percpu_ida_alloc(struct percpu_ida *pool, int state);
-void percpu_ida_free(struct percpu_ida *pool, unsigned tag);
-
-void percpu_ida_destroy(struct percpu_ida *pool);
-int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags,
- unsigned long max_size, unsigned long batch_size);
-static inline int percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags)
-{
- return __percpu_ida_init(pool, nr_tags, IDA_DEFAULT_PCPU_SIZE,
- IDA_DEFAULT_PCPU_BATCH_MOVE);
-}
-
-typedef int (*percpu_ida_cb)(unsigned, void *);
-int percpu_ida_for_each_free(struct percpu_ida *pool, percpu_ida_cb fn,
- void *data);
-
-unsigned percpu_ida_free_tags(struct percpu_ida *pool, int cpu);
-#endif /* __PERCPU_IDA_H__ */
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index ad5444491975..10f92e1d8e7b 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -25,6 +25,12 @@
*/
#define ARMPMU_MAX_HWEVENTS 32
+/*
+ * ARM PMU hw_event flags
+ */
+/* Event uses a 64bit counter */
+#define ARMPMU_EVT_64BIT 1
+
#define HW_OP_UNSUPPORTED 0xFFFF
#define C(_x) PERF_COUNT_HW_CACHE_##_x
#define CACHE_OP_UNSUPPORTED 0xFFFF
@@ -87,14 +93,13 @@ struct arm_pmu {
struct perf_event *event);
int (*set_event_filter)(struct hw_perf_event *evt,
struct perf_event_attr *attr);
- u32 (*read_counter)(struct perf_event *event);
- void (*write_counter)(struct perf_event *event, u32 val);
+ u64 (*read_counter)(struct perf_event *event);
+ void (*write_counter)(struct perf_event *event, u64 val);
void (*start)(struct arm_pmu *);
void (*stop)(struct arm_pmu *);
void (*reset)(void *);
int (*map_event)(struct perf_event *event);
int num_events;
- u64 max_period;
bool secure_access; /* 32-bit ARM only */
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 1fa12887ec02..53c500f0ca79 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -490,7 +490,7 @@ struct perf_addr_filters_head {
};
/**
- * enum perf_event_state - the states of a event
+ * enum perf_event_state - the states of an event:
*/
enum perf_event_state {
PERF_EVENT_STATE_DEAD = -4,
@@ -1130,6 +1130,7 @@ extern void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct
extern struct perf_callchain_entry *
get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
u32 max_stack, bool crosstask, bool add_mark);
+extern struct perf_callchain_entry *perf_callchain(struct perf_event *event, struct pt_regs *regs);
extern int get_callchain_buffers(int max_stack);
extern void put_callchain_buffers(void);
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 6cd09098427c..cd6f637cbbfb 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -825,6 +825,16 @@ static inline bool phy_interrupt_is_valid(struct phy_device *phydev)
}
/**
+ * phy_polling_mode - Convenience function for testing whether polling is
+ * used to detect PHY status changes
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_polling_mode(struct phy_device *phydev)
+{
+ return phydev->irq == PHY_POLL;
+}
+
+/**
* phy_is_internal - Convenience function for testing if a PHY is internal
* @phydev: the phy_device struct
*/
@@ -942,6 +952,8 @@ void phy_start(struct phy_device *phydev);
void phy_stop(struct phy_device *phydev);
int phy_start_aneg(struct phy_device *phydev);
int phy_aneg_done(struct phy_device *phydev);
+int phy_speed_down(struct phy_device *phydev, bool sync);
+int phy_speed_up(struct phy_device *phydev);
int phy_stop_interrupts(struct phy_device *phydev);
int phy_restart_aneg(struct phy_device *phydev);
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 50eeae025f1e..021fc6595856 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -234,5 +234,6 @@ int phylink_mii_ioctl(struct phylink *, struct ifreq *, int);
#define phylink_test(bm, mode) __phylink_do_bit(test_bit, bm, mode)
void phylink_set_port_modes(unsigned long *bits);
+void phylink_helper_basex_speed(struct phylink_link_state *state);
#endif
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 7633d55d9a24..14a9a39da9c7 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -7,11 +7,10 @@
enum pid_type
{
PIDTYPE_PID,
+ PIDTYPE_TGID,
PIDTYPE_PGID,
PIDTYPE_SID,
PIDTYPE_MAX,
- /* only valid to __task_pid_nr_ns() */
- __PIDTYPE_TGID
};
/*
@@ -67,12 +66,6 @@ struct pid
extern struct pid init_struct_pid;
-struct pid_link
-{
- struct hlist_node node;
- struct pid *pid;
-};
-
static inline struct pid *get_pid(struct pid *pid)
{
if (pid)
@@ -177,7 +170,7 @@ pid_t pid_vnr(struct pid *pid);
do { \
if ((pid) != NULL) \
hlist_for_each_entry_rcu((task), \
- &(pid)->tasks[type], pids[type].node) {
+ &(pid)->tasks[type], pid_links[type]) {
/*
* Both old and new leaders may be attached to
diff --git a/include/linux/pinctrl/pinconf.h b/include/linux/pinctrl/pinconf.h
index 09eb80f2574a..8dd85d302b90 100644
--- a/include/linux/pinctrl/pinconf.h
+++ b/include/linux/pinctrl/pinconf.h
@@ -28,7 +28,8 @@ struct seq_file;
* is not available on this controller this should return -ENOTSUPP
* and if it is available but disabled it should return -EINVAL
* @pin_config_set: configure an individual pin
- * @pin_config_group_get: get configurations for an entire pin group
+ * @pin_config_group_get: get configurations for an entire pin group; should
+ * return -ENOTSUPP and -EINVAL using the same rules as pin_config_get.
* @pin_config_group_set: configure all pins in a group
* @pin_config_dbg_parse_modify: optional debugfs to modify a pin configuration
* @pin_config_dbg_show: optional debugfs display hook that will provide
diff --git a/include/linux/platform_data/ams-delta-fiq.h b/include/linux/platform_data/ams-delta-fiq.h
new file mode 100644
index 000000000000..cf4589ccb720
--- /dev/null
+++ b/include/linux/platform_data/ams-delta-fiq.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * include/linux/platform_data/ams-delta-fiq.h
+ *
+ * Taken from the original Amstrad modifications to fiq.h
+ *
+ * Copyright (c) 2004 Amstrad Plc
+ * Copyright (c) 2006 Matt Callow
+ * Copyright (c) 2010 Janusz Krzysztofik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef __LINUX_PLATFORM_DATA_AMS_DELTA_FIQ_H
+#define __LINUX_PLATFORM_DATA_AMS_DELTA_FIQ_H
+
+/*
+ * These are the offsets from the beginning of the fiq_buffer. They are put here
+ * since the buffer and header need to be accessed by drivers servicing devices
+ * which generate GPIO interrupts - e.g. keyboard, modem, hook switch.
+ */
+#define FIQ_MASK 0
+#define FIQ_STATE 1
+#define FIQ_KEYS_CNT 2
+#define FIQ_TAIL_OFFSET 3
+#define FIQ_HEAD_OFFSET 4
+#define FIQ_BUF_LEN 5
+#define FIQ_KEY 6
+#define FIQ_MISSED_KEYS 7
+#define FIQ_BUFFER_START 8
+#define FIQ_GPIO_INT_MASK 9
+#define FIQ_KEYS_HICNT 10
+#define FIQ_IRQ_PEND 11
+#define FIQ_SIR_CODE_L1 12
+#define IRQ_SIR_CODE_L2 13
+
+#define FIQ_CNT_INT_00 14
+#define FIQ_CNT_INT_KEY 15
+#define FIQ_CNT_INT_MDM 16
+#define FIQ_CNT_INT_03 17
+#define FIQ_CNT_INT_HSW 18
+#define FIQ_CNT_INT_05 19
+#define FIQ_CNT_INT_06 20
+#define FIQ_CNT_INT_07 21
+#define FIQ_CNT_INT_08 22
+#define FIQ_CNT_INT_09 23
+#define FIQ_CNT_INT_10 24
+#define FIQ_CNT_INT_11 25
+#define FIQ_CNT_INT_12 26
+#define FIQ_CNT_INT_13 27
+#define FIQ_CNT_INT_14 28
+#define FIQ_CNT_INT_15 29
+
+#define FIQ_CIRC_BUFF 30 /*Start of circular buffer */
+
+#endif
diff --git a/include/linux/platform_data/bt-nokia-h4p.h b/include/linux/platform_data/bt-nokia-h4p.h
deleted file mode 100644
index 30d169dfadf3..000000000000
--- a/include/linux/platform_data/bt-nokia-h4p.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/*
- * This file is part of Nokia H4P bluetooth driver
- *
- * Copyright (C) 2010 Nokia Corporation.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- *
- */
-
-
-/**
- * struct hci_h4p_platform data - hci_h4p Platform data structure
- */
-struct hci_h4p_platform_data {
- int chip_type;
- int bt_sysclk;
- unsigned int bt_wakeup_gpio;
- unsigned int host_wakeup_gpio;
- unsigned int reset_gpio;
- int reset_gpio_shared;
- unsigned int uart_irq;
- phys_addr_t uart_base;
- const char *uart_iclk;
- const char *uart_fclk;
- void (*set_pm_limits)(struct device *dev, bool set);
-};
diff --git a/include/linux/platform_data/gpio-davinci.h b/include/linux/platform_data/gpio-davinci.h
index 90ae19ca828f..57a5a35e0073 100644
--- a/include/linux/platform_data/gpio-davinci.h
+++ b/include/linux/platform_data/gpio-davinci.h
@@ -22,6 +22,7 @@
#include <asm-generic/gpio.h>
#define MAX_REGS_BANKS 5
+#define MAX_INT_PER_BANK 32
struct davinci_gpio_platform_data {
u32 ngpio;
@@ -41,7 +42,7 @@ struct davinci_gpio_controller {
spinlock_t lock;
void __iomem *regs[MAX_REGS_BANKS];
int gpio_unbanked;
- unsigned int base_irq;
+ int irqs[MAX_INT_PER_BANK];
unsigned int base;
};
diff --git a/include/linux/platform_data/i2c-hid.h b/include/linux/platform_data/i2c-hid.h
index 1fb088239d12..c628bb5e1061 100644
--- a/include/linux/platform_data/i2c-hid.h
+++ b/include/linux/platform_data/i2c-hid.h
@@ -12,14 +12,13 @@
#ifndef __LINUX_I2C_HID_H
#define __LINUX_I2C_HID_H
+#include <linux/regulator/consumer.h>
#include <linux/types.h>
-struct regulator;
-
/**
* struct i2chid_platform_data - used by hid over i2c implementation.
* @hid_descriptor_address: i2c register where the HID descriptor is stored.
- * @supply: regulator for powering on the device.
+ * @supplies: regulators for powering on the device.
* @post_power_delay_ms: delay after powering on before device is usable.
*
* Note that it is the responsibility of the platform driver (or the acpi 5.0
@@ -35,7 +34,7 @@ struct regulator;
*/
struct i2c_hid_platform_data {
u16 hid_descriptor_address;
- struct regulator *supply;
+ struct regulator_bulk_data supplies[2];
int post_power_delay_ms;
};
diff --git a/include/linux/platform_data/i2c-ocores.h b/include/linux/platform_data/i2c-ocores.h
index 01edd96fe1f7..113d6b12f650 100644
--- a/include/linux/platform_data/i2c-ocores.h
+++ b/include/linux/platform_data/i2c-ocores.h
@@ -1,7 +1,7 @@
/*
* i2c-ocores.h - definitions for the i2c-ocores interface
*
- * Peter Korsgaard <jacmet@sunsite.dk>
+ * Peter Korsgaard <peter@korsgaard.com>
*
* This file is licensed under the terms of the GNU General Public License
* version 2. This program is licensed "as is" without any warranty of any
diff --git a/include/linux/platform_data/jz4740/jz4740_nand.h b/include/linux/platform_data/jz4740/jz4740_nand.h
new file mode 100644
index 000000000000..bc571f6d5ced
--- /dev/null
+++ b/include/linux/platform_data/jz4740/jz4740_nand.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de>
+ * JZ4740 SoC NAND controller driver
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __JZ4740_NAND_H__
+#define __JZ4740_NAND_H__
+
+#include <linux/mtd/rawnand.h>
+#include <linux/mtd/partitions.h>
+
+#define JZ_NAND_NUM_BANKS 4
+
+struct jz_nand_platform_data {
+ int num_partitions;
+ struct mtd_partition *partitions;
+
+ unsigned char banks[JZ_NAND_NUM_BANKS];
+
+ void (*ident_callback)(struct platform_device *, struct mtd_info *,
+ struct mtd_partition **, int *num_partitions);
+};
+
+#endif
diff --git a/include/linux/platform_data/media/sii9234.h b/include/linux/platform_data/media/sii9234.h
deleted file mode 100644
index 6a4a809fe9a3..000000000000
--- a/include/linux/platform_data/media/sii9234.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/*
- * Driver header for SII9234 MHL converter chip.
- *
- * Copyright (c) 2011 Samsung Electronics, Co. Ltd
- * Contact: Tomasz Stanislawski <t.stanislaws@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef SII9234_H
-#define SII9234_H
-
-/**
- * @gpio_n_reset: GPIO driving nRESET pin
- */
-
-struct sii9234_platform_data {
- int gpio_n_reset;
-};
-
-#endif /* SII9234_H */
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
index 7daa78a2f342..640dec8b5b0c 100644
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ b/include/linux/platform_data/mmc-esdhc-imx.h
@@ -34,7 +34,6 @@ enum cd_types {
* @cd_gpio: gpio for card_detect interrupt
* @wp_type: type of write_protect method (see wp_types enum above)
* @cd_type: type of card_detect method (see cd_types enum above)
- * @support_vsel: indicate it supports 1.8v switching
*/
struct esdhc_platform_data {
@@ -43,7 +42,6 @@ struct esdhc_platform_data {
enum wp_types wp_type;
enum cd_types cd_type;
int max_bus_width;
- bool support_vsel;
unsigned int delay_line;
unsigned int tuning_step; /* The delay cell steps in tuning procedure */
unsigned int tuning_start_tap; /* The start delay cell point in tuning procedure */
diff --git a/include/linux/platform_data/mmp_dma.h b/include/linux/platform_data/mmp_dma.h
index d1397c8ed94e..6397b9c8149a 100644
--- a/include/linux/platform_data/mmp_dma.h
+++ b/include/linux/platform_data/mmp_dma.h
@@ -12,9 +12,13 @@
#ifndef MMP_DMA_H
#define MMP_DMA_H
+struct dma_slave_map;
+
struct mmp_dma_platdata {
int dma_channels;
int nb_requestors;
+ int slave_map_cnt;
+ const struct dma_slave_map *slave_map;
};
#endif /* MMP_DMA_H */
diff --git a/include/linux/platform_data/mtd-davinci-aemif.h b/include/linux/platform_data/mtd-davinci-aemif.h
index 97948ac2bb9b..a403dd51dacc 100644
--- a/include/linux/platform_data/mtd-davinci-aemif.h
+++ b/include/linux/platform_data/mtd-davinci-aemif.h
@@ -33,5 +33,4 @@ struct davinci_aemif_timing {
u8 ta;
};
-int davinci_aemif_setup(struct platform_device *pdev);
#endif
diff --git a/include/linux/platform_data/mtd-orion_nand.h b/include/linux/platform_data/mtd-orion_nand.h
index a7ce77c7c1a8..34828eb85982 100644
--- a/include/linux/platform_data/mtd-orion_nand.h
+++ b/include/linux/platform_data/mtd-orion_nand.h
@@ -12,7 +12,6 @@
*/
struct orion_nand_data {
struct mtd_partition *parts;
- int (*dev_ready)(struct mtd_info *mtd);
u32 nr_parts;
u8 ale; /* address line number connected to ALE */
u8 cle; /* address line number connected to CLE */
diff --git a/include/linux/platform_data/pm33xx.h b/include/linux/platform_data/pm33xx.h
index f9bed2a0af9d..fbf5ed73c7cc 100644
--- a/include/linux/platform_data/pm33xx.h
+++ b/include/linux/platform_data/pm33xx.h
@@ -12,6 +12,29 @@
#include <linux/kbuild.h>
#include <linux/types.h>
+/*
+ * WFI Flags for sleep code control
+ *
+ * These flags allow PM code to exclude certain operations from happening
+ * in the low level ASM code found in sleep33xx.S and sleep43xx.S
+ *
+ * WFI_FLAG_FLUSH_CACHE: Flush the ARM caches and disable caching. Only
+ * needed when MPU will lose context.
+ * WFI_FLAG_SELF_REFRESH: Let EMIF place DDR memory into self-refresh and
+ * disable EMIF.
+ * WFI_FLAG_SAVE_EMIF: Save context of all EMIF registers and restore in
+ * resume path. Only needed if PER domain loses context
+ * and must also have WFI_FLAG_SELF_REFRESH set.
+ * WFI_FLAG_WAKE_M3: Disable MPU clock or clockdomain to cause wkup_m3 to
+ * execute when WFI instruction executes.
+ * WFI_FLAG_RTC_ONLY: Configure the RTC to enter RTC+DDR mode.
+ */
+#define WFI_FLAG_FLUSH_CACHE BIT(0)
+#define WFI_FLAG_SELF_REFRESH BIT(1)
+#define WFI_FLAG_SAVE_EMIF BIT(2)
+#define WFI_FLAG_WAKE_M3 BIT(3)
+#define WFI_FLAG_RTC_ONLY BIT(4)
+
#ifndef __ASSEMBLER__
struct am33xx_pm_sram_addr {
void (*do_wfi)(void);
@@ -19,12 +42,15 @@ struct am33xx_pm_sram_addr {
unsigned long *resume_offset;
unsigned long *emif_sram_table;
unsigned long *ro_sram_data;
+ unsigned long resume_address;
};
struct am33xx_pm_platform_data {
int (*init)(void);
- int (*soc_suspend)(unsigned int state, int (*fn)(unsigned long));
+ int (*soc_suspend)(unsigned int state, int (*fn)(unsigned long),
+ unsigned long args);
struct am33xx_pm_sram_addr *(*get_sram_addrs)(void);
+ void __iomem *(*get_rtc_base_addr)(void);
};
struct am33xx_pm_sram_data {
@@ -36,6 +62,7 @@ struct am33xx_pm_sram_data {
struct am33xx_pm_ro_sram_data {
u32 amx3_pm_sram_data_virt;
u32 amx3_pm_sram_data_phys;
+ void __iomem *rtc_base_virt;
} __packed __aligned(8);
#endif /* __ASSEMBLER__ */
diff --git a/include/linux/platform_data/sh_ipmmu.h b/include/linux/platform_data/sh_ipmmu.h
deleted file mode 100644
index 39f7405cdac5..000000000000
--- a/include/linux/platform_data/sh_ipmmu.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* sh_ipmmu.h
- *
- * Copyright (C) 2012 Hideki EIRAKU
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- */
-
-#ifndef __SH_IPMMU_H__
-#define __SH_IPMMU_H__
-
-struct shmobile_ipmmu_platform_data {
- const char * const *dev_names;
- unsigned int num_dev_names;
-};
-
-#endif /* __SH_IPMMU_H__ */
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
index 990aad477458..2efa3470a451 100644
--- a/include/linux/platform_data/ti-sysc.h
+++ b/include/linux/platform_data/ti-sysc.h
@@ -14,6 +14,7 @@ enum ti_sysc_module_type {
TI_SYSC_OMAP4_SR,
TI_SYSC_OMAP4_MCASP,
TI_SYSC_OMAP4_USB_HOST_FS,
+ TI_SYSC_DRA7_MCAN,
};
struct ti_sysc_cookie {
diff --git a/include/linux/platform_data/txx9/ndfmc.h b/include/linux/platform_data/txx9/ndfmc.h
new file mode 100644
index 000000000000..fc172627d54e
--- /dev/null
+++ b/include/linux/platform_data/txx9/ndfmc.h
@@ -0,0 +1,30 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * (C) Copyright TOSHIBA CORPORATION 2007
+ */
+#ifndef __TXX9_NDFMC_H
+#define __TXX9_NDFMC_H
+
+#define NDFMC_PLAT_FLAG_USE_BSPRT 0x01
+#define NDFMC_PLAT_FLAG_NO_RSTR 0x02
+#define NDFMC_PLAT_FLAG_HOLDADD 0x04
+#define NDFMC_PLAT_FLAG_DUMMYWRITE 0x08
+
+struct txx9ndfmc_platform_data {
+ unsigned int shift;
+ unsigned int gbus_clock;
+ unsigned int hold; /* hold time in nanosecond */
+ unsigned int spw; /* strobe pulse width in nanosecond */
+ unsigned int flags;
+ unsigned char ch_mask; /* available channel bitmask */
+ unsigned char wp_mask; /* write-protect bitmask */
+ unsigned char wide_mask; /* 16bit-nand bitmask */
+};
+
+void txx9_ndfmc_init(unsigned long baseaddr,
+ const struct txx9ndfmc_platform_data *plat_data);
+
+#endif /* __TXX9_NDFMC_H */
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index 3097c943fab9..1a9f38f27f65 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -12,13 +12,13 @@
#define _PLATFORM_DEVICE_H_
#include <linux/device.h>
-#include <linux/mod_devicetable.h>
#define PLATFORM_DEVID_NONE (-1)
#define PLATFORM_DEVID_AUTO (-2)
struct mfd_cell;
struct property_entry;
+struct platform_device_id;
struct platform_device {
const char *name;
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 9206a4fef9ac..776c546d581a 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -234,11 +234,13 @@ struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
int of_genpd_parse_idle_states(struct device_node *dn,
struct genpd_power_state **states, int *n);
unsigned int of_genpd_opp_to_performance_state(struct device *dev,
- struct device_node *opp_node);
+ struct device_node *np);
int genpd_dev_pm_attach(struct device *dev);
struct device *genpd_dev_pm_attach_by_id(struct device *dev,
unsigned int index);
+struct device *genpd_dev_pm_attach_by_name(struct device *dev,
+ char *name);
#else /* !CONFIG_PM_GENERIC_DOMAINS_OF */
static inline int of_genpd_add_provider_simple(struct device_node *np,
struct generic_pm_domain *genpd)
@@ -274,9 +276,9 @@ static inline int of_genpd_parse_idle_states(struct device_node *dn,
static inline unsigned int
of_genpd_opp_to_performance_state(struct device *dev,
- struct device_node *opp_node)
+ struct device_node *np)
{
- return -ENODEV;
+ return 0;
}
static inline int genpd_dev_pm_attach(struct device *dev)
@@ -290,6 +292,12 @@ static inline struct device *genpd_dev_pm_attach_by_id(struct device *dev,
return NULL;
}
+static inline struct device *genpd_dev_pm_attach_by_name(struct device *dev,
+ char *name)
+{
+ return NULL;
+}
+
static inline
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
{
@@ -301,6 +309,8 @@ struct generic_pm_domain *of_genpd_remove_last(struct device_node *np)
int dev_pm_domain_attach(struct device *dev, bool power_on);
struct device *dev_pm_domain_attach_by_id(struct device *dev,
unsigned int index);
+struct device *dev_pm_domain_attach_by_name(struct device *dev,
+ char *name);
void dev_pm_domain_detach(struct device *dev, bool power_off);
void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
#else
@@ -313,6 +323,11 @@ static inline struct device *dev_pm_domain_attach_by_id(struct device *dev,
{
return NULL;
}
+static inline struct device *dev_pm_domain_attach_by_name(struct device *dev,
+ char *name)
+{
+ return NULL;
+}
static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
static inline void dev_pm_domain_set(struct device *dev,
struct dev_pm_domain *pd) {}
diff --git a/include/linux/poll.h b/include/linux/poll.h
index fdf86b4cbc71..7e0fdcf905d2 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -74,18 +74,18 @@ static inline void init_poll_funcptr(poll_table *pt, poll_queue_proc qproc)
pt->_key = ~(__poll_t)0; /* all events enabled */
}
-static inline bool file_has_poll_mask(struct file *file)
+static inline bool file_can_poll(struct file *file)
{
- return file->f_op->get_poll_head && file->f_op->poll_mask;
+ return file->f_op->poll;
}
-static inline bool file_can_poll(struct file *file)
+static inline __poll_t vfs_poll(struct file *file, struct poll_table_struct *pt)
{
- return file->f_op->poll || file_has_poll_mask(file);
+ if (unlikely(!file->f_op->poll))
+ return DEFAULT_POLLMASK;
+ return file->f_op->poll(file, pt);
}
-__poll_t vfs_poll(struct file *file, struct poll_table_struct *pt);
-
struct poll_table_entry {
struct file *filp;
__poll_t key;
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index c85704fcdbd2..ee7e987ea1b4 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -95,8 +95,8 @@ struct k_itimer {
clockid_t it_clock;
timer_t it_id;
int it_active;
- int it_overrun;
- int it_overrun_last;
+ s64 it_overrun;
+ s64 it_overrun_last;
int it_requeue_pending;
int it_sigev_notify;
ktime_t it_interval;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index b21c4bd96b84..f80769175c56 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -269,6 +269,7 @@ struct power_supply {
spinlock_t changed_lock;
bool changed;
bool initialized;
+ bool removing;
atomic_t use_cnt;
#ifdef CONFIG_THERMAL
struct thermal_zone_device *tzd;
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 5bd3f151da78..c01813c3fbe9 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -150,7 +150,7 @@
*/
#define in_atomic_preempt_off() (preempt_count() != PREEMPT_DISABLE_OFFSET)
-#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
+#if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_TRACE_PREEMPT_TOGGLE)
extern void preempt_count_add(int val);
extern void preempt_count_sub(int val);
#define preempt_count_dec_and_test() \
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 6d7e800affd8..cf3eccfe1543 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -50,15 +50,15 @@ static inline const char *printk_skip_headers(const char *buffer)
/* We show everything that is MORE important than this.. */
#define CONSOLE_LOGLEVEL_SILENT 0 /* Mum's the word */
#define CONSOLE_LOGLEVEL_MIN 1 /* Minimum loglevel we let people use */
-#define CONSOLE_LOGLEVEL_QUIET 4 /* Shhh ..., when booted with "quiet" */
#define CONSOLE_LOGLEVEL_DEBUG 10 /* issue debug messages */
#define CONSOLE_LOGLEVEL_MOTORMOUTH 15 /* You can't shut this one up */
/*
- * Default used to be hard-coded at 7, we're now allowing it to be set from
- * kernel config.
+ * Default used to be hard-coded at 7, quiet used to be hardcoded at 4,
+ * we're now allowing both to be set from kernel config.
*/
#define CONSOLE_LOGLEVEL_DEFAULT CONFIG_CONSOLE_LOGLEVEL_DEFAULT
+#define CONSOLE_LOGLEVEL_QUIET CONFIG_CONSOLE_LOGLEVEL_QUIET
extern int console_printk[];
@@ -148,9 +148,13 @@ void early_printk(const char *s, ...) { }
#ifdef CONFIG_PRINTK_NMI
extern void printk_nmi_enter(void);
extern void printk_nmi_exit(void);
+extern void printk_nmi_direct_enter(void);
+extern void printk_nmi_direct_exit(void);
#else
static inline void printk_nmi_enter(void) { }
static inline void printk_nmi_exit(void) { }
+static inline void printk_nmi_direct_enter(void) { }
+static inline void printk_nmi_direct_exit(void) { }
#endif /* PRINTK_NMI */
#ifdef CONFIG_PRINTK
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index 626fc65c4336..d0e1f1522a78 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -129,7 +129,7 @@ int open_related_ns(struct ns_common *ns,
struct ns_common *(*get_ns)(struct ns_common *ns));
/* get the associated pid namespace for a file in procfs */
-static inline struct pid_namespace *proc_pid_ns(struct inode *inode)
+static inline struct pid_namespace *proc_pid_ns(const struct inode *inode)
{
return inode->i_sb->s_fs_info;
}
diff --git a/include/linux/pti.h b/include/linux/pti.h
index 0174883a935a..1a941efcaa62 100644
--- a/include/linux/pti.h
+++ b/include/linux/pti.h
@@ -6,6 +6,7 @@
#include <asm/pti.h>
#else
static inline void pti_init(void) { }
+static inline void pti_finalize(void) { }
#endif
#endif
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 037bf0ef1ae9..4f36431c380b 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -214,8 +214,6 @@ static inline void ptrace_init_task(struct task_struct *child, bool ptrace)
task_set_jobctl_pending(child, JOBCTL_TRAP_STOP);
else
sigaddset(&child->pending.signal, SIGSTOP);
-
- set_tsk_thread_flag(child, TIF_SIGPENDING);
}
else
child->ptracer_cred = NULL;
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 8461b18e4608..13b4244d44c1 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -171,6 +171,14 @@
#define SSACD_SCDB (1 << 3) /* SSPSYSCLK Divider Bypass */
#define SSACD_ACPS(x) ((x) << 4) /* Audio clock PLL select */
#define SSACD_ACDS(x) ((x) << 0) /* Audio clock divider select */
+#define SSACD_ACDS_1 (0)
+#define SSACD_ACDS_2 (1)
+#define SSACD_ACDS_4 (2)
+#define SSACD_ACDS_8 (3)
+#define SSACD_ACDS_16 (4)
+#define SSACD_ACDS_32 (5)
+#define SSACD_SCDB_4X (0)
+#define SSACD_SCDB_1X (1)
#define SSACD_SCDX8 (1 << 7) /* SYSCLK division ratio select */
/* LPSS SSP */
@@ -212,8 +220,6 @@ struct ssp_device {
int type;
int use_count;
int irq;
- int drcmr_rx;
- int drcmr_tx;
struct device_node *of_node;
};
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index b401b962afff..5d65521260b3 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -87,6 +87,10 @@ static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
static inline int
qcom_scm_pas_auth_and_reset(u32 peripheral) { return -ENODEV; }
static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; }
+static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
+ unsigned int *src,
+ struct qcom_scm_vmperm *newvm,
+ int dest_cnt) { return -ENODEV; }
static inline void qcom_scm_cpu_power_down(u32 flags) {}
static inline u32 qcom_scm_get_version(void) { return 0; }
static inline u32
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 2978fa4add42..a1310482c4ed 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -39,6 +39,10 @@
#include <linux/qed/qed_if.h>
#include <linux/qed/qed_iov_if.h>
+/* 64 max queues * (1 rx + 4 tx-cos + 1 xdp) */
+#define QED_MIN_L2_CONS (2 + NUM_PHYS_TCS_4PORT_K2)
+#define QED_MAX_L2_CONS (64 * (QED_MIN_L2_CONS))
+
struct qed_queue_start_common_params {
/* Should always be relative to entity sending this. */
u8 vport_id;
@@ -49,6 +53,8 @@ struct qed_queue_start_common_params {
struct qed_sb_info *p_sb;
u8 sb_idx;
+
+ u8 tc;
};
struct qed_rxq_start_ret_params {
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index b4040023cbfb..8cd34645e892 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -759,6 +759,9 @@ struct qed_generic_tlvs {
u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN];
};
+#define QED_I2C_DEV_ADDR_A0 0xA0
+#define QED_I2C_DEV_ADDR_A2 0xA2
+
#define QED_NVM_SIGNATURE 0x12435687
enum qed_nvm_flash_cmd {
@@ -1026,6 +1029,18 @@ struct qed_common_ops {
* @param enabled - true iff WoL should be enabled.
*/
int (*update_wol) (struct qed_dev *cdev, bool enabled);
+
+/**
+ * @brief read_module_eeprom
+ *
+ * @param cdev
+ * @param buf - buffer
+ * @param dev_addr - PHY device memory region
+ * @param offset - offset into eeprom contents to be read
+ * @param len - buffer length, i.e., max bytes to be read
+ */
+ int (*read_module_eeprom)(struct qed_dev *cdev,
+ char *buf, u8 dev_addr, u32 offset, u32 len);
};
#define MASK_FIELD(_name, _value) \
diff --git a/include/linux/random.h b/include/linux/random.h
index 2ddf13b4281e..445a0ea4ff49 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -36,9 +36,10 @@ extern void add_interrupt_randomness(int irq, int irq_flags) __latent_entropy;
extern void get_random_bytes(void *buf, int nbytes);
extern int wait_for_random_bytes(void);
+extern bool rng_is_initialized(void);
extern int add_random_ready_callback(struct random_ready_callback *rdy);
extern void del_random_ready_callback(struct random_ready_callback *rdy);
-extern void get_random_bytes_arch(void *buf, int nbytes);
+extern int __must_check get_random_bytes_arch(void *buf, int nbytes);
#ifndef MODULE
extern const struct file_operations random_fops, urandom_fops;
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 36df6ccbc874..4786c2235b98 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -396,7 +396,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* @member: the name of the list_head within the struct.
*
* Continue to iterate over list of given type, continuing after
- * the current position.
+ * the current position which must have been in the list when the RCU read
+ * lock was taken.
+ * This would typically require either that you obtained the node from a
+ * previous walk of the list in the same RCU read-side critical section, or
+ * that you held some sort of non-RCU reference (such as a reference count)
+ * to keep the node alive *and* in the list.
+ *
+ * This iterator is similar to list_for_each_entry_from_rcu() except
+ * this starts after the given position and that one starts at the given
+ * position.
*/
#define list_for_each_entry_continue_rcu(pos, head, member) \
for (pos = list_entry_rcu(pos->member.next, typeof(*pos), member); \
@@ -411,6 +420,14 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
*
* Iterate over the tail of a list starting from a given position,
* which must have been in the list when the RCU read lock was taken.
+ * This would typically require either that you obtained the node from a
+ * previous walk of the list in the same RCU read-side critical section, or
+ * that you held some sort of non-RCU reference (such as a reference count)
+ * to keep the node alive *and* in the list.
+ *
+ * This iterator is similar to list_for_each_entry_continue_rcu() except
+ * this starts from the given position and that one starts from the position
+ * after the given position.
*/
#define list_for_each_entry_from_rcu(pos, head, member) \
for (; &(pos)->member != (head); \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 65163aa0bb04..75e5b393cf44 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -64,7 +64,6 @@ void rcu_barrier_tasks(void);
void __rcu_read_lock(void);
void __rcu_read_unlock(void);
-void rcu_read_unlock_special(struct task_struct *t);
void synchronize_rcu(void);
/*
@@ -159,11 +158,11 @@ static inline void rcu_init_nohz(void) { }
} while (0)
/*
- * Note a voluntary context switch for RCU-tasks benefit. This is a
- * macro rather than an inline function to avoid #include hell.
+ * Note a quasi-voluntary context switch for RCU-tasks's benefit.
+ * This is a macro rather than an inline function to avoid #include hell.
*/
#ifdef CONFIG_TASKS_RCU
-#define rcu_note_voluntary_context_switch_lite(t) \
+#define rcu_tasks_qs(t) \
do { \
if (READ_ONCE((t)->rcu_tasks_holdout)) \
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
@@ -171,14 +170,14 @@ static inline void rcu_init_nohz(void) { }
#define rcu_note_voluntary_context_switch(t) \
do { \
rcu_all_qs(); \
- rcu_note_voluntary_context_switch_lite(t); \
+ rcu_tasks_qs(t); \
} while (0)
void call_rcu_tasks(struct rcu_head *head, rcu_callback_t func);
void synchronize_rcu_tasks(void);
void exit_tasks_rcu_start(void);
void exit_tasks_rcu_finish(void);
#else /* #ifdef CONFIG_TASKS_RCU */
-#define rcu_note_voluntary_context_switch_lite(t) do { } while (0)
+#define rcu_tasks_qs(t) do { } while (0)
#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
#define call_rcu_tasks call_rcu_sched
#define synchronize_rcu_tasks synchronize_sched
@@ -195,8 +194,8 @@ static inline void exit_tasks_rcu_finish(void) { }
*/
#define cond_resched_tasks_rcu_qs() \
do { \
- if (!cond_resched()) \
- rcu_note_voluntary_context_switch_lite(current); \
+ rcu_tasks_qs(current); \
+ cond_resched(); \
} while (0)
/*
@@ -567,8 +566,8 @@ static inline void rcu_preempt_sleep_check(void) { }
* This is simply an identity function, but it documents where a pointer
* is handed off from RCU to some other synchronization mechanism, for
* example, reference counting or locking. In C11, it would map to
- * kill_dependency(). It could be used as follows:
- * ``
+ * kill_dependency(). It could be used as follows::
+ *
* rcu_read_lock();
* p = rcu_dereference(gp);
* long_lived = is_long_lived(p);
@@ -579,7 +578,6 @@ static inline void rcu_preempt_sleep_check(void) { }
* p = rcu_pointer_handoff(p);
* }
* rcu_read_unlock();
- *``
*/
#define rcu_pointer_handoff(p) (p)
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 7b3c82e8a625..8d9a0ea8f0b5 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -93,7 +93,7 @@ static inline void kfree_call_rcu(struct rcu_head *head,
#define rcu_note_context_switch(preempt) \
do { \
rcu_sched_qs(); \
- rcu_note_voluntary_context_switch_lite(current); \
+ rcu_tasks_qs(current); \
} while (0)
static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
diff --git a/include/linux/reciprocal_div.h b/include/linux/reciprocal_div.h
index e031e9f2f9d8..585ce89c0f33 100644
--- a/include/linux/reciprocal_div.h
+++ b/include/linux/reciprocal_div.h
@@ -25,6 +25,9 @@ struct reciprocal_value {
u8 sh1, sh2;
};
+/* "reciprocal_value" and "reciprocal_divide" together implement the basic
+ * version of the algorithm described in Figure 4.1 of the paper.
+ */
struct reciprocal_value reciprocal_value(u32 d);
static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R)
@@ -33,4 +36,69 @@ static inline u32 reciprocal_divide(u32 a, struct reciprocal_value R)
return (t + ((a - t) >> R.sh1)) >> R.sh2;
}
+struct reciprocal_value_adv {
+ u32 m;
+ u8 sh, exp;
+ bool is_wide_m;
+};
+
+/* "reciprocal_value_adv" implements the advanced version of the algorithm
+ * described in Figure 4.2 of the paper except when "divisor > (1U << 31)" whose
+ * ceil(log2(d)) result will be 32 which then requires u128 divide on host. The
+ * exception case could be easily handled before calling "reciprocal_value_adv".
+ *
+ * The advanced version requires more complex calculation to get the reciprocal
+ * multiplier and other control variables, but then could reduce the required
+ * emulation operations.
+ *
+ * It makes no sense to use this advanced version for host divide emulation,
+ * those extra complexities for calculating multiplier etc could completely
+ * waive our saving on emulation operations.
+ *
+ * However, it makes sense to use it for JIT divide code generation for which
+ * we are willing to trade performance of JITed code with that of host. As shown
+ * by the following pseudo code, the required emulation operations could go down
+ * from 6 (the basic version) to 3 or 4.
+ *
+ * To use the result of "reciprocal_value_adv", suppose we want to calculate
+ * n/d, the pseudo C code will be:
+ *
+ * struct reciprocal_value_adv rvalue;
+ * u8 pre_shift, exp;
+ *
+ * // handle exception case.
+ * if (d >= (1U << 31)) {
+ * result = n >= d;
+ * return;
+ * }
+ *
+ * rvalue = reciprocal_value_adv(d, 32)
+ * exp = rvalue.exp;
+ * if (rvalue.is_wide_m && !(d & 1)) {
+ * // floor(log2(d & (2^32 -d)))
+ * pre_shift = fls(d & -d) - 1;
+ * rvalue = reciprocal_value_adv(d >> pre_shift, 32 - pre_shift);
+ * } else {
+ * pre_shift = 0;
+ * }
+ *
+ * // code generation starts.
+ * if (imm == 1U << exp) {
+ * result = n >> exp;
+ * } else if (rvalue.is_wide_m) {
+ * // pre_shift must be zero when reached here.
+ * t = (n * rvalue.m) >> 32;
+ * result = n - t;
+ * result >>= 1;
+ * result += t;
+ * result >>= rvalue.sh - 1;
+ * } else {
+ * if (pre_shift)
+ * result = n >> pre_shift;
+ * result = ((u64)result * rvalue.m) >> 32;
+ * result >>= rvalue.sh;
+ * }
+ */
+struct reciprocal_value_adv reciprocal_value_adv(u32 d, u8 prec);
+
#endif /* _LINUX_RECIPROCAL_DIV_H */
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 4193c41e383a..e28cce21bad6 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -3,9 +3,10 @@
#define _LINUX_REFCOUNT_H
#include <linux/atomic.h>
-#include <linux/mutex.h>
-#include <linux/spinlock.h>
-#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/spinlock_types.h>
+
+struct mutex;
/**
* struct refcount_t - variant of atomic_t specialized for reference counts
@@ -42,17 +43,30 @@ static inline unsigned int refcount_read(const refcount_t *r)
return atomic_read(&r->refs);
}
+extern __must_check bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r);
+extern void refcount_add_checked(unsigned int i, refcount_t *r);
+
+extern __must_check bool refcount_inc_not_zero_checked(refcount_t *r);
+extern void refcount_inc_checked(refcount_t *r);
+
+extern __must_check bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r);
+
+extern __must_check bool refcount_dec_and_test_checked(refcount_t *r);
+extern void refcount_dec_checked(refcount_t *r);
+
#ifdef CONFIG_REFCOUNT_FULL
-extern __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r);
-extern void refcount_add(unsigned int i, refcount_t *r);
-extern __must_check bool refcount_inc_not_zero(refcount_t *r);
-extern void refcount_inc(refcount_t *r);
+#define refcount_add_not_zero refcount_add_not_zero_checked
+#define refcount_add refcount_add_checked
+
+#define refcount_inc_not_zero refcount_inc_not_zero_checked
+#define refcount_inc refcount_inc_checked
-extern __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r);
+#define refcount_sub_and_test refcount_sub_and_test_checked
+
+#define refcount_dec_and_test refcount_dec_and_test_checked
+#define refcount_dec refcount_dec_checked
-extern __must_check bool refcount_dec_and_test(refcount_t *r);
-extern void refcount_dec(refcount_t *r);
#else
# ifdef CONFIG_ARCH_HAS_REFCOUNT
# include <asm/refcount.h>
@@ -98,5 +112,7 @@ extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);
extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
-
+extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
+ spinlock_t *lock,
+ unsigned long *flags);
#endif /* _LINUX_REFCOUNT_H */
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index 4f38068ffb71..379505a53722 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -268,6 +268,13 @@ typedef void (*regmap_unlock)(void *);
* field is NULL but precious_table (see below) is not, the
* check is performed on such table (a register is precious if
* it belongs to one of the ranges specified by precious_table).
+ * @readable_noinc_reg: Optional callback returning true if the register
+ * supports multiple read operations without incrementing
+ * the register number. If this field is NULL but
+ * rd_noinc_table (see below) is not, the check is
+ * performed on such table (a register is no increment
+ * readable if it belongs to one of the ranges specified
+ * by rd_noinc_table).
* @disable_locking: This regmap is either protected by external means or
* is guaranteed not be be accessed from multiple threads.
* Don't use any locking mechanisms.
@@ -295,6 +302,7 @@ typedef void (*regmap_unlock)(void *);
* @rd_table: As above, for read access.
* @volatile_table: As above, for volatile registers.
* @precious_table: As above, for precious registers.
+ * @rd_noinc_table: As above, for no increment readable registers.
* @reg_defaults: Power on reset values for registers (for use with
* register cache support).
* @num_reg_defaults: Number of elements in reg_defaults.
@@ -344,6 +352,7 @@ struct regmap_config {
bool (*readable_reg)(struct device *dev, unsigned int reg);
bool (*volatile_reg)(struct device *dev, unsigned int reg);
bool (*precious_reg)(struct device *dev, unsigned int reg);
+ bool (*readable_noinc_reg)(struct device *dev, unsigned int reg);
bool disable_locking;
regmap_lock lock;
@@ -360,6 +369,7 @@ struct regmap_config {
const struct regmap_access_table *rd_table;
const struct regmap_access_table *volatile_table;
const struct regmap_access_table *precious_table;
+ const struct regmap_access_table *rd_noinc_table;
const struct reg_default *reg_defaults;
unsigned int num_reg_defaults;
enum regcache_type cache_type;
@@ -514,6 +524,10 @@ struct regmap *__regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__regmap_init_sccb(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__regmap_init_slimbus(struct slim_device *slimbus,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -558,6 +572,10 @@ struct regmap *__devm_regmap_init_i2c(struct i2c_client *i2c,
const struct regmap_config *config,
struct lock_class_key *lock_key,
const char *lock_name);
+struct regmap *__devm_regmap_init_sccb(struct i2c_client *i2c,
+ const struct regmap_config *config,
+ struct lock_class_key *lock_key,
+ const char *lock_name);
struct regmap *__devm_regmap_init_spi(struct spi_device *dev,
const struct regmap_config *config,
struct lock_class_key *lock_key,
@@ -646,6 +664,19 @@ int regmap_attach_dev(struct device *dev, struct regmap *map,
i2c, config)
/**
+ * regmap_init_sccb() - Initialise register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer to
+ * a struct regmap.
+ */
+#define regmap_init_sccb(i2c, config) \
+ __regmap_lockdep_wrapper(__regmap_init_sccb, #config, \
+ i2c, config)
+
+/**
* regmap_init_slimbus() - Initialise register map
*
* @slimbus: Device that will be interacted with
@@ -798,6 +829,20 @@ bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
i2c, config)
/**
+ * devm_regmap_init_sccb() - Initialise managed register map
+ *
+ * @i2c: Device that will be interacted with
+ * @config: Configuration for register map
+ *
+ * The return value will be an ERR_PTR() on error or a valid pointer
+ * to a struct regmap. The regmap will be automatically freed by the
+ * device management code.
+ */
+#define devm_regmap_init_sccb(i2c, config) \
+ __regmap_lockdep_wrapper(__devm_regmap_init_sccb, #config, \
+ i2c, config)
+
+/**
* devm_regmap_init_spi() - Initialise register map
*
* @dev: Device that will be interacted with
@@ -946,6 +991,8 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg,
int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val);
int regmap_raw_read(struct regmap *map, unsigned int reg,
void *val, size_t val_len);
+int regmap_noinc_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len);
int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
size_t val_count);
int regmap_update_bits_base(struct regmap *map, unsigned int reg,
@@ -1196,6 +1243,13 @@ static inline int regmap_raw_read(struct regmap *map, unsigned int reg,
return -EINVAL;
}
+static inline int regmap_noinc_read(struct regmap *map, unsigned int reg,
+ void *val, size_t val_len)
+{
+ WARN_ONCE(1, "regmap API is disabled");
+ return -EINVAL;
+}
+
static inline int regmap_bulk_read(struct regmap *map, unsigned int reg,
void *val, size_t val_count)
{
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index fc2dc8df476f..0fd8fbb74763 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -46,7 +46,7 @@ enum regulator_status {
/**
* struct regulator_linear_range - specify linear voltage ranges
*
- * Specify a range of voltages for regulator_map_linar_range() and
+ * Specify a range of voltages for regulator_map_linear_range() and
* regulator_list_linear_range().
*
* @min_uV: Lowest voltage in range
@@ -220,7 +220,7 @@ struct regulator_ops {
/* set regulator suspend operating mode (defined in consumer.h) */
int (*set_suspend_mode) (struct regulator_dev *, unsigned int mode);
- int (*resume_early)(struct regulator_dev *rdev);
+ int (*resume)(struct regulator_dev *rdev);
int (*set_pull_down) (struct regulator_dev *);
};
diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h
index e0ccf46f66cf..cb5aecd40f07 100644
--- a/include/linux/regulator/pfuze100.h
+++ b/include/linux/regulator/pfuze100.h
@@ -64,6 +64,17 @@
#define PFUZE3000_VLDO3 11
#define PFUZE3000_VLDO4 12
+#define PFUZE3001_SW1 0
+#define PFUZE3001_SW2 1
+#define PFUZE3001_SW3 2
+#define PFUZE3001_VSNVS 3
+#define PFUZE3001_VLDO1 4
+#define PFUZE3001_VLDO2 5
+#define PFUZE3001_VCCSD 6
+#define PFUZE3001_V33 7
+#define PFUZE3001_VLDO3 8
+#define PFUZE3001_VLDO4 9
+
struct regulator_init_data;
struct pfuze_regulator_platform_data {
diff --git a/include/linux/remoteproc.h b/include/linux/remoteproc.h
index dfdaede9139e..e3c5d856b6da 100644
--- a/include/linux/remoteproc.h
+++ b/include/linux/remoteproc.h
@@ -477,15 +477,19 @@ struct rproc {
/**
* struct rproc_subdev - subdevice tied to a remoteproc
* @node: list node related to the rproc subdevs list
- * @probe: probe function, called as the rproc is started
- * @remove: remove function, called as the rproc is being stopped, the @crashed
- * parameter indicates if this originates from the a recovery
+ * @prepare: prepare function, called before the rproc is started
+ * @start: start function, called after the rproc has been started
+ * @stop: stop function, called before the rproc is stopped; the @crashed
+ * parameter indicates if this originates from a recovery
+ * @unprepare: unprepare function, called after the rproc has been stopped
*/
struct rproc_subdev {
struct list_head node;
- int (*probe)(struct rproc_subdev *subdev);
- void (*remove)(struct rproc_subdev *subdev, bool crashed);
+ int (*prepare)(struct rproc_subdev *subdev);
+ int (*start)(struct rproc_subdev *subdev);
+ void (*stop)(struct rproc_subdev *subdev, bool crashed);
+ void (*unprepare)(struct rproc_subdev *subdev);
};
/* we currently support only two vrings per rvdev */
@@ -566,10 +570,7 @@ static inline struct rproc *vdev_to_rproc(struct virtio_device *vdev)
return rvdev->rproc;
}
-void rproc_add_subdev(struct rproc *rproc,
- struct rproc_subdev *subdev,
- int (*probe)(struct rproc_subdev *subdev),
- void (*remove)(struct rproc_subdev *subdev, bool crashed));
+void rproc_add_subdev(struct rproc *rproc, struct rproc_subdev *subdev);
void rproc_remove_subdev(struct rproc *rproc, struct rproc_subdev *subdev);
diff --git a/include/linux/rfkill.h b/include/linux/rfkill.h
index e6a0031d1b1f..8ad2487a86d5 100644
--- a/include/linux/rfkill.h
+++ b/include/linux/rfkill.h
@@ -66,7 +66,7 @@ struct rfkill_ops {
#if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE)
/**
- * rfkill_alloc - allocate rfkill structure
+ * rfkill_alloc - Allocate rfkill structure
* @name: name of the struct -- the string is not copied internally
* @parent: device that has rf switch on it
* @type: type of the switch (RFKILL_TYPE_*)
@@ -112,7 +112,7 @@ void rfkill_pause_polling(struct rfkill *rfkill);
/**
* rfkill_resume_polling(struct rfkill *rfkill)
*
- * Pause polling -- say transmitter is off for other reasons.
+ * Resume polling
* NOTE: not necessary for suspend/resume -- in that case the
* core stops polling anyway
*/
@@ -130,7 +130,7 @@ void rfkill_resume_polling(struct rfkill *rfkill);
void rfkill_unregister(struct rfkill *rfkill);
/**
- * rfkill_destroy - free rfkill structure
+ * rfkill_destroy - Free rfkill structure
* @rfkill: rfkill structure to be destroyed
*
* Destroys the rfkill structure.
@@ -140,7 +140,7 @@ void rfkill_destroy(struct rfkill *rfkill);
/**
* rfkill_set_hw_state - Set the internal rfkill hardware block state
* @rfkill: pointer to the rfkill class to modify.
- * @state: the current hardware block state to set
+ * @blocked: the current hardware block state to set
*
* rfkill drivers that get events when the hard-blocked state changes
* use this function to notify the rfkill core (and through that also
@@ -161,7 +161,7 @@ bool rfkill_set_hw_state(struct rfkill *rfkill, bool blocked);
/**
* rfkill_set_sw_state - Set the internal rfkill software block state
* @rfkill: pointer to the rfkill class to modify.
- * @state: the current software block state to set
+ * @blocked: the current software block state to set
*
* rfkill drivers that get events when the soft-blocked state changes
* (yes, some platforms directly act on input but allow changing again)
@@ -183,7 +183,7 @@ bool rfkill_set_sw_state(struct rfkill *rfkill, bool blocked);
/**
* rfkill_init_sw_state - Initialize persistent software block state
* @rfkill: pointer to the rfkill class to modify.
- * @state: the current software block state to set
+ * @blocked: the current software block state to set
*
* rfkill drivers that preserve their software block state over power off
* use this function to notify the rfkill core (and through that also
@@ -208,17 +208,17 @@ void rfkill_init_sw_state(struct rfkill *rfkill, bool blocked);
void rfkill_set_states(struct rfkill *rfkill, bool sw, bool hw);
/**
- * rfkill_blocked - query rfkill block
+ * rfkill_blocked - Query rfkill block state
*
* @rfkill: rfkill struct to query
*/
bool rfkill_blocked(struct rfkill *rfkill);
/**
- * rfkill_find_type - Helpper for finding rfkill type by name
+ * rfkill_find_type - Helper for finding rfkill type by name
* @name: the name of the type
*
- * Returns enum rfkill_type that conrresponds the name.
+ * Returns enum rfkill_type that corresponds to the name.
*/
enum rfkill_type rfkill_find_type(const char *name);
@@ -296,7 +296,7 @@ static inline enum rfkill_type rfkill_find_type(const char *name)
const char *rfkill_get_led_trigger_name(struct rfkill *rfkill);
/**
- * rfkill_set_led_trigger_name -- set the LED trigger name
+ * rfkill_set_led_trigger_name - Set the LED trigger name
* @rfkill: rfkill struct
* @name: LED trigger name
*
diff --git a/include/linux/rhashtable-types.h b/include/linux/rhashtable-types.h
new file mode 100644
index 000000000000..763d613ce2c2
--- /dev/null
+++ b/include/linux/rhashtable-types.h
@@ -0,0 +1,137 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Resizable, Scalable, Concurrent Hash Table
+ *
+ * Simple structures that might be needed in include
+ * files.
+ */
+
+#ifndef _LINUX_RHASHTABLE_TYPES_H
+#define _LINUX_RHASHTABLE_TYPES_H
+
+#include <linux/atomic.h>
+#include <linux/compiler.h>
+#include <linux/mutex.h>
+#include <linux/workqueue.h>
+
+struct rhash_head {
+ struct rhash_head __rcu *next;
+};
+
+struct rhlist_head {
+ struct rhash_head rhead;
+ struct rhlist_head __rcu *next;
+};
+
+struct bucket_table;
+
+/**
+ * struct rhashtable_compare_arg - Key for the function rhashtable_compare
+ * @ht: Hash table
+ * @key: Key to compare against
+ */
+struct rhashtable_compare_arg {
+ struct rhashtable *ht;
+ const void *key;
+};
+
+typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
+typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
+typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
+ const void *obj);
+
+/**
+ * struct rhashtable_params - Hash table construction parameters
+ * @nelem_hint: Hint on number of elements, should be 75% of desired size
+ * @key_len: Length of key
+ * @key_offset: Offset of key in struct to be hashed
+ * @head_offset: Offset of rhash_head in struct to be hashed
+ * @max_size: Maximum size while expanding
+ * @min_size: Minimum size while shrinking
+ * @locks_mul: Number of bucket locks to allocate per cpu (default: 32)
+ * @automatic_shrinking: Enable automatic shrinking of tables
+ * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
+ * @obj_hashfn: Function to hash object
+ * @obj_cmpfn: Function to compare key with object
+ */
+struct rhashtable_params {
+ u16 nelem_hint;
+ u16 key_len;
+ u16 key_offset;
+ u16 head_offset;
+ unsigned int max_size;
+ u16 min_size;
+ bool automatic_shrinking;
+ u8 locks_mul;
+ rht_hashfn_t hashfn;
+ rht_obj_hashfn_t obj_hashfn;
+ rht_obj_cmpfn_t obj_cmpfn;
+};
+
+/**
+ * struct rhashtable - Hash table handle
+ * @tbl: Bucket table
+ * @key_len: Key length for hashfn
+ * @max_elems: Maximum number of elements in table
+ * @p: Configuration parameters
+ * @rhlist: True if this is an rhltable
+ * @run_work: Deferred worker to expand/shrink asynchronously
+ * @mutex: Mutex to protect current/future table swapping
+ * @lock: Spin lock to protect walker list
+ * @nelems: Number of elements in table
+ */
+struct rhashtable {
+ struct bucket_table __rcu *tbl;
+ unsigned int key_len;
+ unsigned int max_elems;
+ struct rhashtable_params p;
+ bool rhlist;
+ struct work_struct run_work;
+ struct mutex mutex;
+ spinlock_t lock;
+ atomic_t nelems;
+};
+
+/**
+ * struct rhltable - Hash table with duplicate objects in a list
+ * @ht: Underlying rhtable
+ */
+struct rhltable {
+ struct rhashtable ht;
+};
+
+/**
+ * struct rhashtable_walker - Hash table walker
+ * @list: List entry on list of walkers
+ * @tbl: The table that we were walking over
+ */
+struct rhashtable_walker {
+ struct list_head list;
+ struct bucket_table *tbl;
+};
+
+/**
+ * struct rhashtable_iter - Hash table iterator
+ * @ht: Table to iterate through
+ * @p: Current pointer
+ * @list: Current hash list pointer
+ * @walker: Associated rhashtable walker
+ * @slot: Current slot
+ * @skip: Number of entries to skip in slot
+ */
+struct rhashtable_iter {
+ struct rhashtable *ht;
+ struct rhash_head *p;
+ struct rhlist_head *list;
+ struct rhashtable_walker walker;
+ unsigned int slot;
+ unsigned int skip;
+ bool end_of_table;
+};
+
+int rhashtable_init(struct rhashtable *ht,
+ const struct rhashtable_params *params);
+int rhltable_init(struct rhltable *hlt,
+ const struct rhashtable_params *params);
+
+#endif /* _LINUX_RHASHTABLE_TYPES_H */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 4e1f535c2034..eb7111039247 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
* Resizable, Scalable, Concurrent Hash Table
*
@@ -17,37 +18,18 @@
#ifndef _LINUX_RHASHTABLE_H
#define _LINUX_RHASHTABLE_H
-#include <linux/atomic.h>
-#include <linux/compiler.h>
#include <linux/err.h>
#include <linux/errno.h>
#include <linux/jhash.h>
#include <linux/list_nulls.h>
#include <linux/workqueue.h>
-#include <linux/mutex.h>
#include <linux/rculist.h>
+#include <linux/rhashtable-types.h>
/*
* The end of the chain is marked with a special nulls marks which has
- * the following format:
- *
- * +-------+-----------------------------------------------------+-+
- * | Base | Hash |1|
- * +-------+-----------------------------------------------------+-+
- *
- * Base (4 bits) : Reserved to distinguish between multiple tables.
- * Specified via &struct rhashtable_params.nulls_base.
- * Hash (27 bits): Full hash (unmasked) of first element added to bucket
- * 1 (1 bit) : Nulls marker (always set)
- *
- * The remaining bits of the next pointer remain unused for now.
+ * the least significant bit set.
*/
-#define RHT_BASE_BITS 4
-#define RHT_HASH_BITS 27
-#define RHT_BASE_SHIFT RHT_HASH_BITS
-
-/* Base bits plus 1 bit for nulls marker */
-#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
/* Maximum chain length before rehash
*
@@ -64,15 +46,6 @@
*/
#define RHT_ELASTICITY 16u
-struct rhash_head {
- struct rhash_head __rcu *next;
-};
-
-struct rhlist_head {
- struct rhash_head rhead;
- struct rhlist_head __rcu *next;
-};
-
/**
* struct bucket_table - Table of hash buckets
* @size: Number of hash buckets
@@ -102,132 +75,14 @@ struct bucket_table {
struct rhash_head __rcu *buckets[] ____cacheline_aligned_in_smp;
};
-/**
- * struct rhashtable_compare_arg - Key for the function rhashtable_compare
- * @ht: Hash table
- * @key: Key to compare against
- */
-struct rhashtable_compare_arg {
- struct rhashtable *ht;
- const void *key;
-};
-
-typedef u32 (*rht_hashfn_t)(const void *data, u32 len, u32 seed);
-typedef u32 (*rht_obj_hashfn_t)(const void *data, u32 len, u32 seed);
-typedef int (*rht_obj_cmpfn_t)(struct rhashtable_compare_arg *arg,
- const void *obj);
-
-struct rhashtable;
-
-/**
- * struct rhashtable_params - Hash table construction parameters
- * @nelem_hint: Hint on number of elements, should be 75% of desired size
- * @key_len: Length of key
- * @key_offset: Offset of key in struct to be hashed
- * @head_offset: Offset of rhash_head in struct to be hashed
- * @max_size: Maximum size while expanding
- * @min_size: Minimum size while shrinking
- * @locks_mul: Number of bucket locks to allocate per cpu (default: 32)
- * @automatic_shrinking: Enable automatic shrinking of tables
- * @nulls_base: Base value to generate nulls marker
- * @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
- * @obj_hashfn: Function to hash object
- * @obj_cmpfn: Function to compare key with object
- */
-struct rhashtable_params {
- u16 nelem_hint;
- u16 key_len;
- u16 key_offset;
- u16 head_offset;
- unsigned int max_size;
- u16 min_size;
- bool automatic_shrinking;
- u8 locks_mul;
- u32 nulls_base;
- rht_hashfn_t hashfn;
- rht_obj_hashfn_t obj_hashfn;
- rht_obj_cmpfn_t obj_cmpfn;
-};
-
-/**
- * struct rhashtable - Hash table handle
- * @tbl: Bucket table
- * @key_len: Key length for hashfn
- * @max_elems: Maximum number of elements in table
- * @p: Configuration parameters
- * @rhlist: True if this is an rhltable
- * @run_work: Deferred worker to expand/shrink asynchronously
- * @mutex: Mutex to protect current/future table swapping
- * @lock: Spin lock to protect walker list
- * @nelems: Number of elements in table
- */
-struct rhashtable {
- struct bucket_table __rcu *tbl;
- unsigned int key_len;
- unsigned int max_elems;
- struct rhashtable_params p;
- bool rhlist;
- struct work_struct run_work;
- struct mutex mutex;
- spinlock_t lock;
- atomic_t nelems;
-};
-
-/**
- * struct rhltable - Hash table with duplicate objects in a list
- * @ht: Underlying rhtable
- */
-struct rhltable {
- struct rhashtable ht;
-};
-
-/**
- * struct rhashtable_walker - Hash table walker
- * @list: List entry on list of walkers
- * @tbl: The table that we were walking over
- */
-struct rhashtable_walker {
- struct list_head list;
- struct bucket_table *tbl;
-};
-
-/**
- * struct rhashtable_iter - Hash table iterator
- * @ht: Table to iterate through
- * @p: Current pointer
- * @list: Current hash list pointer
- * @walker: Associated rhashtable walker
- * @slot: Current slot
- * @skip: Number of entries to skip in slot
- */
-struct rhashtable_iter {
- struct rhashtable *ht;
- struct rhash_head *p;
- struct rhlist_head *list;
- struct rhashtable_walker walker;
- unsigned int slot;
- unsigned int skip;
- bool end_of_table;
-};
-
-static inline unsigned long rht_marker(const struct rhashtable *ht, u32 hash)
-{
- return NULLS_MARKER(ht->p.nulls_base + hash);
-}
-
-#define INIT_RHT_NULLS_HEAD(ptr, ht, hash) \
- ((ptr) = (typeof(ptr)) rht_marker(ht, hash))
+#define INIT_RHT_NULLS_HEAD(ptr) \
+ ((ptr) = (typeof(ptr)) NULLS_MARKER(0))
static inline bool rht_is_a_nulls(const struct rhash_head *ptr)
{
return ((unsigned long) ptr & 1);
}
-static inline unsigned long rht_get_nulls_value(const struct rhash_head *ptr)
-{
- return ((unsigned long) ptr) >> 1;
-}
-
static inline void *rht_obj(const struct rhashtable *ht,
const struct rhash_head *he)
{
@@ -237,7 +92,7 @@ static inline void *rht_obj(const struct rhashtable *ht,
static inline unsigned int rht_bucket_index(const struct bucket_table *tbl,
unsigned int hash)
{
- return (hash >> RHT_HASH_RESERVED_SPACE) & (tbl->size - 1);
+ return hash & (tbl->size - 1);
}
static inline unsigned int rht_key_get_hash(struct rhashtable *ht,
@@ -376,11 +231,6 @@ static inline int lockdep_rht_bucket_is_held(const struct bucket_table *tbl,
}
#endif /* CONFIG_PROVE_LOCKING */
-int rhashtable_init(struct rhashtable *ht,
- const struct rhashtable_params *params);
-int rhltable_init(struct rhltable *hlt,
- const struct rhashtable_params *params);
-
void *rhashtable_insert_slow(struct rhashtable *ht, const void *key,
struct rhash_head *obj);
@@ -745,7 +595,7 @@ static inline void *__rhashtable_insert_fast(
lock = rht_bucket_lock(tbl, hash);
spin_lock_bh(lock);
- if (unlikely(rht_dereference_bucket(tbl->future_tbl, tbl, hash))) {
+ if (unlikely(rcu_access_pointer(tbl->future_tbl))) {
slow_path:
spin_unlock_bh(lock);
rcu_read_unlock();
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index b72ebdff0b77..0940fda59872 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -164,7 +164,8 @@ void ring_buffer_record_disable(struct ring_buffer *buffer);
void ring_buffer_record_enable(struct ring_buffer *buffer);
void ring_buffer_record_off(struct ring_buffer *buffer);
void ring_buffer_record_on(struct ring_buffer *buffer);
-int ring_buffer_record_is_on(struct ring_buffer *buffer);
+bool ring_buffer_record_is_on(struct ring_buffer *buffer);
+bool ring_buffer_record_is_set_on(struct ring_buffer *buffer);
void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
diff --git a/include/linux/rmi.h b/include/linux/rmi.h
index 64125443f8a6..5ef5c7c412a7 100644
--- a/include/linux/rmi.h
+++ b/include/linux/rmi.h
@@ -354,6 +354,8 @@ struct rmi_driver_data {
struct mutex irq_mutex;
struct input_dev *input;
+ struct irq_domain *irqdomain;
+
u8 pdt_props;
u8 num_rx_electrodes;
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 6268208760e9..6aedc30003e7 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -87,16 +87,11 @@ struct rtc_class_ops {
int (*set_offset)(struct device *, long offset);
};
-typedef struct rtc_task {
- void (*func)(void *private_data);
- void *private_data;
-} rtc_task_t;
-
-
struct rtc_timer {
- struct rtc_task task;
struct timerqueue_node node;
ktime_t period;
+ void (*func)(void *private_data);
+ void *private_data;
int enabled;
};
@@ -121,8 +116,6 @@ struct rtc_device {
wait_queue_head_t irq_queue;
struct fasync_struct *async_queue;
- struct rtc_task *irq_task;
- spinlock_t irq_task_lock;
int irq_freq;
int max_user_freq;
@@ -204,14 +197,8 @@ extern void rtc_update_irq(struct rtc_device *rtc,
extern struct rtc_device *rtc_class_open(const char *name);
extern void rtc_class_close(struct rtc_device *rtc);
-extern int rtc_irq_register(struct rtc_device *rtc,
- struct rtc_task *task);
-extern void rtc_irq_unregister(struct rtc_device *rtc,
- struct rtc_task *task);
-extern int rtc_irq_set_state(struct rtc_device *rtc,
- struct rtc_task *task, int enabled);
-extern int rtc_irq_set_freq(struct rtc_device *rtc,
- struct rtc_task *task, int freq);
+extern int rtc_irq_set_state(struct rtc_device *rtc, int enabled);
+extern int rtc_irq_set_freq(struct rtc_device *rtc, int freq);
extern int rtc_update_irq_enable(struct rtc_device *rtc, unsigned int enabled);
extern int rtc_alarm_irq_enable(struct rtc_device *rtc, unsigned int enabled);
extern int rtc_dev_update_irq_enable_emul(struct rtc_device *rtc,
diff --git a/include/linux/rtmutex.h b/include/linux/rtmutex.h
index 1b92a28dd672..6fd615a0eea9 100644
--- a/include/linux/rtmutex.h
+++ b/include/linux/rtmutex.h
@@ -106,7 +106,14 @@ static inline int rt_mutex_is_locked(struct rt_mutex *lock)
extern void __rt_mutex_init(struct rt_mutex *lock, const char *name, struct lock_class_key *key);
extern void rt_mutex_destroy(struct rt_mutex *lock);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+extern void rt_mutex_lock_nested(struct rt_mutex *lock, unsigned int subclass);
+#define rt_mutex_lock(lock) rt_mutex_lock_nested(lock, 0)
+#else
extern void rt_mutex_lock(struct rt_mutex *lock);
+#define rt_mutex_lock_nested(lock, subclass) rt_mutex_lock(lock)
+#endif
+
extern int rt_mutex_lock_interruptible(struct rt_mutex *lock);
extern int rt_mutex_timed_lock(struct rt_mutex *lock,
struct hrtimer_sleeper *timeout);
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index e6539536dea9..804a50983ec5 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -23,6 +23,8 @@
#include <linux/kernel.h>
#include <linux/slab.h>
+struct seq_file;
+
/**
* struct sbitmap_word - Word in a &struct sbitmap.
*/
diff --git a/include/linux/scatterlist.h b/include/linux/scatterlist.h
index 51f52020ad5f..093aa57120b0 100644
--- a/include/linux/scatterlist.h
+++ b/include/linux/scatterlist.h
@@ -9,9 +9,6 @@
#include <asm/io.h>
struct scatterlist {
-#ifdef CONFIG_DEBUG_SG
- unsigned long sg_magic;
-#endif
unsigned long page_link;
unsigned int offset;
unsigned int length;
@@ -64,7 +61,6 @@ struct sg_table {
*
*/
-#define SG_MAGIC 0x87654321
#define SG_CHAIN 0x01UL
#define SG_END 0x02UL
@@ -98,7 +94,6 @@ static inline void sg_assign_page(struct scatterlist *sg, struct page *page)
*/
BUG_ON((unsigned long) page & (SG_CHAIN | SG_END));
#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
sg->page_link = page_link | (unsigned long) page;
@@ -129,7 +124,6 @@ static inline void sg_set_page(struct scatterlist *sg, struct page *page,
static inline struct page *sg_page(struct scatterlist *sg)
{
#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
BUG_ON(sg_is_chain(sg));
#endif
return (struct page *)((sg)->page_link & ~(SG_CHAIN | SG_END));
@@ -195,9 +189,6 @@ static inline void sg_chain(struct scatterlist *prv, unsigned int prv_nents,
**/
static inline void sg_mark_end(struct scatterlist *sg)
{
-#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
/*
* Set termination bit, clear potential chain bit
*/
@@ -215,9 +206,6 @@ static inline void sg_mark_end(struct scatterlist *sg)
**/
static inline void sg_unmark_end(struct scatterlist *sg)
{
-#ifdef CONFIG_DEBUG_SG
- BUG_ON(sg->sg_magic != SG_MAGIC);
-#endif
sg->page_link &= ~SG_END;
}
@@ -260,12 +248,6 @@ static inline void *sg_virt(struct scatterlist *sg)
static inline void sg_init_marker(struct scatterlist *sgl,
unsigned int nents)
{
-#ifdef CONFIG_DEBUG_SG
- unsigned int i;
-
- for (i = 0; i < nents; i++)
- sgl[i].sg_magic = SG_MAGIC;
-#endif
sg_mark_end(&sgl[nents - 1]);
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 87bf02d93a27..977cb57d7bc9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -118,7 +118,7 @@ struct task_group;
* the comment with set_special_state().
*/
#define is_special_task_state(state) \
- ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_DEAD))
+ ((state) & (__TASK_STOPPED | __TASK_TRACED | TASK_PARKED | TASK_DEAD))
#define __set_current_state(state_value) \
do { \
@@ -167,8 +167,8 @@ struct task_group;
* need_sleep = false;
* wake_up_state(p, TASK_UNINTERRUPTIBLE);
*
- * Where wake_up_state() (and all other wakeup primitives) imply enough
- * barriers to order the store of the variable against wakeup.
+ * where wake_up_state() executes a full memory barrier before accessing the
+ * task state.
*
* Wakeup will do: if (@state & p->state) p->state = TASK_RUNNING, that is,
* once it observes the TASK_UNINTERRUPTIBLE store the waking CPU can issue a
@@ -722,8 +722,8 @@ struct task_struct {
unsigned restore_sigmask:1;
#endif
#ifdef CONFIG_MEMCG
- unsigned memcg_may_oom:1;
-#ifndef CONFIG_SLOB
+ unsigned in_user_fault:1;
+#ifdef CONFIG_MEMCG_KMEM
unsigned memcg_kmem_skip_account:1;
#endif
#endif
@@ -734,6 +734,10 @@ struct task_struct {
/* disallow userland-initiated cgroup migration */
unsigned no_cgroup_migration:1;
#endif
+#ifdef CONFIG_BLK_CGROUP
+ /* to be used once the psi infrastructure lands upstream. */
+ unsigned use_memdelay:1;
+#endif
unsigned long atomic_flags; /* Flags requiring atomic access. */
@@ -775,7 +779,8 @@ struct task_struct {
struct list_head ptrace_entry;
/* PID/PID hash table linkage. */
- struct pid_link pids[PIDTYPE_MAX];
+ struct pid *thread_pid;
+ struct hlist_node pid_links[PIDTYPE_MAX];
struct list_head thread_group;
struct list_head thread_node;
@@ -849,6 +854,7 @@ struct task_struct {
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
unsigned long last_switch_count;
+ unsigned long last_switch_time;
#endif
/* Filesystem information: */
struct fs_struct *fs;
@@ -1017,7 +1023,6 @@ struct task_struct {
u64 last_sum_exec_runtime;
struct callback_head numa_work;
- struct list_head numa_entry;
struct numa_group *numa_group;
/*
@@ -1149,6 +1154,13 @@ struct task_struct {
/* Number of pages to reclaim on returning to userland: */
unsigned int memcg_nr_pages_over_high;
+
+ /* Used by memcontrol for targeted memcg charge: */
+ struct mem_cgroup *active_memcg;
+#endif
+
+#ifdef CONFIG_BLK_CGROUP
+ struct request_queue *throttle_queue;
#endif
#ifdef CONFIG_UPROBES
@@ -1199,27 +1211,7 @@ struct task_struct {
static inline struct pid *task_pid(struct task_struct *task)
{
- return task->pids[PIDTYPE_PID].pid;
-}
-
-static inline struct pid *task_tgid(struct task_struct *task)
-{
- return task->group_leader->pids[PIDTYPE_PID].pid;
-}
-
-/*
- * Without tasklist or RCU lock it is not safe to dereference
- * the result of task_pgrp/task_session even if task == current,
- * we can race with another thread doing sys_setsid/sys_setpgid.
- */
-static inline struct pid *task_pgrp(struct task_struct *task)
-{
- return task->group_leader->pids[PIDTYPE_PGID].pid;
-}
-
-static inline struct pid *task_session(struct task_struct *task)
-{
- return task->group_leader->pids[PIDTYPE_SID].pid;
+ return task->thread_pid;
}
/*
@@ -1268,7 +1260,7 @@ static inline pid_t task_tgid_nr(struct task_struct *tsk)
*/
static inline int pid_alive(const struct task_struct *p)
{
- return p->pids[PIDTYPE_PID].pid != NULL;
+ return p->thread_pid != NULL;
}
static inline pid_t task_pgrp_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
@@ -1294,12 +1286,12 @@ static inline pid_t task_session_vnr(struct task_struct *tsk)
static inline pid_t task_tgid_nr_ns(struct task_struct *tsk, struct pid_namespace *ns)
{
- return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, ns);
+ return __task_pid_nr_ns(tsk, PIDTYPE_TGID, ns);
}
static inline pid_t task_tgid_vnr(struct task_struct *tsk)
{
- return __task_pid_nr_ns(tsk, __PIDTYPE_TGID, NULL);
+ return __task_pid_nr_ns(tsk, PIDTYPE_TGID, NULL);
}
static inline pid_t task_ppid_nr_ns(const struct task_struct *tsk, struct pid_namespace *ns)
@@ -1799,20 +1791,22 @@ static inline void rseq_set_notify_resume(struct task_struct *t)
set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
}
-void __rseq_handle_notify_resume(struct pt_regs *regs);
+void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
-static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+ struct pt_regs *regs)
{
if (current->rseq)
- __rseq_handle_notify_resume(regs);
+ __rseq_handle_notify_resume(ksig, regs);
}
-static inline void rseq_signal_deliver(struct pt_regs *regs)
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+ struct pt_regs *regs)
{
preempt_disable();
__set_bit(RSEQ_EVENT_SIGNAL_BIT, &current->rseq_event_mask);
preempt_enable();
- rseq_handle_notify_resume(regs);
+ rseq_handle_notify_resume(ksig, regs);
}
/* rseq_preempt() requires preemption to be disabled. */
@@ -1831,9 +1825,7 @@ static inline void rseq_migrate(struct task_struct *t)
/*
* If parent process has a registered restartable sequences area, the
- * child inherits. Only applies when forking a process, not a thread. In
- * case a parent fork() in the middle of a restartable sequence, set the
- * resume notifier to force the child to retry.
+ * child inherits. Only applies when forking a process, not a thread.
*/
static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
@@ -1847,7 +1839,6 @@ static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
t->rseq_len = current->rseq_len;
t->rseq_sig = current->rseq_sig;
t->rseq_event_mask = current->rseq_event_mask;
- rseq_preempt(t);
}
}
@@ -1864,10 +1855,12 @@ static inline void rseq_execve(struct task_struct *t)
static inline void rseq_set_notify_resume(struct task_struct *t)
{
}
-static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+ struct pt_regs *regs)
{
}
-static inline void rseq_signal_deliver(struct pt_regs *regs)
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+ struct pt_regs *regs)
{
}
static inline void rseq_preempt(struct task_struct *t)
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 44d356f5e47c..aebb370a0006 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -248,6 +248,43 @@ static inline void memalloc_noreclaim_restore(unsigned int flags)
current->flags = (current->flags & ~PF_MEMALLOC) | flags;
}
+#ifdef CONFIG_MEMCG
+/**
+ * memalloc_use_memcg - Starts the remote memcg charging scope.
+ * @memcg: memcg to charge.
+ *
+ * This function marks the beginning of the remote memcg charging scope. All the
+ * __GFP_ACCOUNT allocations till the end of the scope will be charged to the
+ * given memcg.
+ *
+ * NOTE: This function is not nesting safe.
+ */
+static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
+{
+ WARN_ON_ONCE(current->active_memcg);
+ current->active_memcg = memcg;
+}
+
+/**
+ * memalloc_unuse_memcg - Ends the remote memcg charging scope.
+ *
+ * This function marks the end of the remote memcg charging scope started by
+ * memalloc_use_memcg().
+ */
+static inline void memalloc_unuse_memcg(void)
+{
+ current->active_memcg = NULL;
+}
+#else
+static inline void memalloc_use_memcg(struct mem_cgroup *memcg)
+{
+}
+
+static inline void memalloc_unuse_memcg(void)
+{
+}
+#endif
+
#ifdef CONFIG_MEMBARRIER
enum {
MEMBARRIER_STATE_PRIVATE_EXPEDITED_READY = (1U << 0),
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 113d1ad1ced7..1be35729c2c5 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -69,6 +69,11 @@ struct thread_group_cputimer {
bool checking_timer;
};
+struct multiprocess_signals {
+ sigset_t signal;
+ struct hlist_node node;
+};
+
/*
* NOTE! "signal_struct" does not have its own
* locking, because a shared signal_struct always
@@ -90,6 +95,9 @@ struct signal_struct {
/* shared signal handling: */
struct sigpending shared_pending;
+ /* For collecting multiprocess signals during fork */
+ struct hlist_head multiprocess;
+
/* thread group exit support */
int group_exit_code;
/* overloaded:
@@ -146,7 +154,8 @@ struct signal_struct {
#endif
- struct pid *leader_pid;
+ /* PID/PID hash table linkage. */
+ struct pid *pids[PIDTYPE_MAX];
#ifdef CONFIG_NO_HZ_FULL
atomic_t tick_dep_mask;
@@ -314,7 +323,7 @@ int force_sig_pkuerr(void __user *addr, u32 pkey);
int force_sig_ptrace_errno_trap(int errno, void __user *addr);
extern int send_sig_info(int, struct siginfo *, struct task_struct *);
-extern int force_sigsegv(int, struct task_struct *);
+extern void force_sigsegv(int sig, struct task_struct *p);
extern int force_sig_info(int, struct siginfo *, struct task_struct *);
extern int __kill_pgrp_info(int sig, struct siginfo *info, struct pid *pgrp);
extern int kill_pid_info(int sig, struct siginfo *info, struct pid *pid);
@@ -329,7 +338,7 @@ extern int send_sig(int, struct task_struct *, int);
extern int zap_other_threads(struct task_struct *p);
extern struct sigqueue *sigqueue_alloc(void);
extern void sigqueue_free(struct sigqueue *);
-extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
+extern int send_sigqueue(struct sigqueue *, struct pid *, enum pid_type);
extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
static inline int restart_syscall(void)
@@ -371,6 +380,7 @@ static inline int signal_pending_state(long state, struct task_struct *p)
*/
extern void recalc_sigpending_and_wake(struct task_struct *t);
extern void recalc_sigpending(void);
+extern void calculate_sigpending(void);
extern void signal_wake_up_state(struct task_struct *t, unsigned int state);
@@ -383,6 +393,8 @@ static inline void ptrace_signal_wake_up(struct task_struct *t, bool resume)
signal_wake_up_state(t, resume ? __TASK_TRACED : 0);
}
+void task_join_group_stop(struct task_struct *task);
+
#ifdef TIF_RESTORE_SIGMASK
/*
* Legacy restore_sigmask accessors. These are inefficient on
@@ -556,6 +568,37 @@ extern bool current_is_single_threaded(void);
typedef int (*proc_visitor)(struct task_struct *p, void *data);
void walk_process_tree(struct task_struct *top, proc_visitor, void *);
+static inline
+struct pid *task_pid_type(struct task_struct *task, enum pid_type type)
+{
+ struct pid *pid;
+ if (type == PIDTYPE_PID)
+ pid = task_pid(task);
+ else
+ pid = task->signal->pids[type];
+ return pid;
+}
+
+static inline struct pid *task_tgid(struct task_struct *task)
+{
+ return task->signal->pids[PIDTYPE_TGID];
+}
+
+/*
+ * Without tasklist or RCU lock it is not safe to dereference
+ * the result of task_pgrp/task_session even if task == current,
+ * we can race with another thread doing sys_setsid/sys_setpgid.
+ */
+static inline struct pid *task_pgrp(struct task_struct *task)
+{
+ return task->signal->pids[PIDTYPE_PGID];
+}
+
+static inline struct pid *task_session(struct task_struct *task)
+{
+ return task->signal->pids[PIDTYPE_SID];
+}
+
static inline int get_nr_threads(struct task_struct *tsk)
{
return tsk->signal->nr_threads;
@@ -574,7 +617,7 @@ static inline bool thread_group_leader(struct task_struct *p)
*/
static inline bool has_group_leader_pid(struct task_struct *p)
{
- return task_pid(p) == p->signal->leader_pid;
+ return task_pid(p) == task_tgid(p);
}
static inline
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 1c1a1512ec55..a9c32daeb9d8 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -10,6 +10,7 @@ struct ctl_table;
extern int sysctl_hung_task_check_count;
extern unsigned int sysctl_hung_task_panic;
extern unsigned long sysctl_hung_task_timeout_secs;
+extern unsigned long sysctl_hung_task_check_interval_secs;
extern int sysctl_hung_task_warnings;
extern int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
void __user *buffer,
@@ -40,7 +41,6 @@ extern unsigned int sysctl_numa_balancing_scan_size;
#ifdef CONFIG_SCHED_DEBUG
extern __read_mostly unsigned int sysctl_sched_migration_cost;
extern __read_mostly unsigned int sysctl_sched_nr_migrate;
-extern __read_mostly unsigned int sysctl_sched_time_avg;
int sched_proc_update_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *length,
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 5be31eb7b266..108ede99e533 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -75,7 +75,7 @@ extern long _do_fork(unsigned long, unsigned long, unsigned long, int __user *,
extern long do_fork(unsigned long, unsigned long, unsigned long, int __user *, int __user *);
struct task_struct *fork_idle(int);
extern pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags);
-extern long kernel_wait4(pid_t, int *, int, struct rusage *);
+extern long kernel_wait4(pid_t, int __user *, int, struct rusage *);
extern void free_task(struct task_struct *tsk);
diff --git a/include/linux/sched/user.h b/include/linux/sched/user.h
index 96fe289c4c6e..39ad98c09c58 100644
--- a/include/linux/sched/user.h
+++ b/include/linux/sched/user.h
@@ -4,6 +4,7 @@
#include <linux/uidgid.h>
#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/ratelimit.h>
struct key;
@@ -12,7 +13,7 @@ struct key;
* Some day this will be a full-fledged user tracking system..
*/
struct user_struct {
- atomic_t __count; /* reference count */
+ refcount_t __count; /* reference count */
atomic_t processes; /* How many processes does this user have? */
atomic_t sigpending; /* How many pending signals does this user have? */
#ifdef CONFIG_FANOTIFY
@@ -59,7 +60,7 @@ extern struct user_struct root_user;
extern struct user_struct * alloc_uid(kuid_t);
static inline struct user_struct *get_uid(struct user_struct *u)
{
- atomic_inc(&u->__count);
+ refcount_inc(&u->__count);
return u;
}
extern void free_uid(struct user_struct *);
diff --git a/include/linux/sched_clock.h b/include/linux/sched_clock.h
index 411b52e424e1..abe28d5cb3f4 100644
--- a/include/linux/sched_clock.h
+++ b/include/linux/sched_clock.h
@@ -9,17 +9,16 @@
#define LINUX_SCHED_CLOCK
#ifdef CONFIG_GENERIC_SCHED_CLOCK
-extern void sched_clock_postinit(void);
+extern void generic_sched_clock_init(void);
extern void sched_clock_register(u64 (*read)(void), int bits,
unsigned long rate);
#else
-static inline void sched_clock_postinit(void) { }
+static inline void generic_sched_clock_init(void) { }
static inline void sched_clock_register(u64 (*read)(void), int bits,
unsigned long rate)
{
- ;
}
#endif
diff --git a/include/linux/sctp.h b/include/linux/sctp.h
index b36c76635f18..83d94341e003 100644
--- a/include/linux/sctp.h
+++ b/include/linux/sctp.h
@@ -801,4 +801,11 @@ struct sctp_strreset_resptsn {
__be32 receivers_next_tsn;
};
+enum {
+ SCTP_DSCP_SET_MASK = 0x1,
+ SCTP_DSCP_VAL_MASK = 0xfc,
+ SCTP_FLOWLABEL_SET_MASK = 0x100000,
+ SCTP_FLOWLABEL_VAL_MASK = 0xfffff
+};
+
#endif /* __LINUX_SCTP_H__ */
diff --git a/include/linux/security.h b/include/linux/security.h
index 63030c85ee19..75f4156c84d7 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -159,6 +159,27 @@ extern int mmap_min_addr_handler(struct ctl_table *table, int write,
typedef int (*initxattrs) (struct inode *inode,
const struct xattr *xattr_array, void *fs_data);
+
+/* Keep the kernel_load_data_id enum in sync with kernel_read_file_id */
+#define __data_id_enumify(ENUM, dummy) LOADING_ ## ENUM,
+#define __data_id_stringify(dummy, str) #str,
+
+enum kernel_load_data_id {
+ __kernel_read_file_id(__data_id_enumify)
+};
+
+static const char * const kernel_load_data_str[] = {
+ __kernel_read_file_id(__data_id_stringify)
+};
+
+static inline const char *kernel_load_data_id_str(enum kernel_load_data_id id)
+{
+ if ((unsigned)id >= LOADING_MAX_ID)
+ return kernel_load_data_str[LOADING_UNKNOWN];
+
+ return kernel_load_data_str[id];
+}
+
#ifdef CONFIG_SECURITY
struct security_mnt_opts {
@@ -309,7 +330,7 @@ void security_file_set_fowner(struct file *file);
int security_file_send_sigiotask(struct task_struct *tsk,
struct fown_struct *fown, int sig);
int security_file_receive(struct file *file);
-int security_file_open(struct file *file, const struct cred *cred);
+int security_file_open(struct file *file);
int security_task_alloc(struct task_struct *task, unsigned long clone_flags);
void security_task_free(struct task_struct *task);
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
@@ -320,6 +341,7 @@ void security_cred_getsecid(const struct cred *c, u32 *secid);
int security_kernel_act_as(struct cred *new, u32 secid);
int security_kernel_create_files_as(struct cred *new, struct inode *inode);
int security_kernel_module_request(char *kmod_name);
+int security_kernel_load_data(enum kernel_load_data_id id);
int security_kernel_read_file(struct file *file, enum kernel_read_file_id id);
int security_kernel_post_read_file(struct file *file, char *buf, loff_t size,
enum kernel_read_file_id id);
@@ -858,8 +880,7 @@ static inline int security_file_receive(struct file *file)
return 0;
}
-static inline int security_file_open(struct file *file,
- const struct cred *cred)
+static inline int security_file_open(struct file *file)
{
return 0;
}
@@ -909,6 +930,11 @@ static inline int security_kernel_module_request(char *kmod_name)
return 0;
}
+static inline int security_kernel_load_data(enum kernel_load_data_id id)
+{
+ return 0;
+}
+
static inline int security_kernel_read_file(struct file *file,
enum kernel_read_file_id id)
{
diff --git a/include/linux/selection.h b/include/linux/selection.h
index 5b278ce99d8d..a8f5b97b216f 100644
--- a/include/linux/selection.h
+++ b/include/linux/selection.h
@@ -32,6 +32,7 @@ extern unsigned char default_blu[];
extern unsigned short *screen_pos(struct vc_data *vc, int w_offset, int viewed);
extern u16 screen_glyph(struct vc_data *vc, int offset);
+extern u32 screen_glyph_unicode(struct vc_data *vc, int offset);
extern void complement_pos(struct vc_data *vc, int offset);
extern void invert_screen(struct vc_data *vc, int offset, int count, int shift);
@@ -42,4 +43,9 @@ extern u16 vcs_scr_readw(struct vc_data *vc, const u16 *org);
extern void vcs_scr_writew(struct vc_data *vc, u16 val, u16 *org);
extern void vcs_scr_updated(struct vc_data *vc);
+extern int vc_uniscr_check(struct vc_data *vc);
+extern void vc_uniscr_copy_line(struct vc_data *vc, void *dest, int viewed,
+ unsigned int row, unsigned int col,
+ unsigned int nr);
+
#endif
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index 76b9db71e489..18e21427bce4 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -160,6 +160,9 @@ extern void serial8250_do_shutdown(struct uart_port *port);
extern void serial8250_do_pm(struct uart_port *port, unsigned int state,
unsigned int oldstate);
extern void serial8250_do_set_mctrl(struct uart_port *port, unsigned int mctrl);
+extern void serial8250_do_set_divisor(struct uart_port *port, unsigned int baud,
+ unsigned int quot,
+ unsigned int quot_frac);
extern int fsl8250_handle_irq(struct uart_port *port);
int serial8250_handle_irq(struct uart_port *port, unsigned int iir);
unsigned char serial8250_rx_chars(struct uart_8250_port *up, unsigned char lsr);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 06ea4eeb09ab..406edae44ca3 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -127,6 +127,13 @@ struct uart_port {
struct ktermios *);
unsigned int (*get_mctrl)(struct uart_port *);
void (*set_mctrl)(struct uart_port *, unsigned int);
+ unsigned int (*get_divisor)(struct uart_port *,
+ unsigned int baud,
+ unsigned int *frac);
+ void (*set_divisor)(struct uart_port *,
+ unsigned int baud,
+ unsigned int quot,
+ unsigned int quot_frac);
int (*startup)(struct uart_port *port);
void (*shutdown)(struct uart_port *port);
void (*throttle)(struct uart_port *port);
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index ebce9e24906a..d37518e89db2 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -231,6 +231,50 @@ struct sfp_eeprom_id {
struct sfp_eeprom_ext ext;
} __packed;
+struct sfp_diag {
+ __be16 temp_high_alarm;
+ __be16 temp_low_alarm;
+ __be16 temp_high_warn;
+ __be16 temp_low_warn;
+ __be16 volt_high_alarm;
+ __be16 volt_low_alarm;
+ __be16 volt_high_warn;
+ __be16 volt_low_warn;
+ __be16 bias_high_alarm;
+ __be16 bias_low_alarm;
+ __be16 bias_high_warn;
+ __be16 bias_low_warn;
+ __be16 txpwr_high_alarm;
+ __be16 txpwr_low_alarm;
+ __be16 txpwr_high_warn;
+ __be16 txpwr_low_warn;
+ __be16 rxpwr_high_alarm;
+ __be16 rxpwr_low_alarm;
+ __be16 rxpwr_high_warn;
+ __be16 rxpwr_low_warn;
+ __be16 laser_temp_high_alarm;
+ __be16 laser_temp_low_alarm;
+ __be16 laser_temp_high_warn;
+ __be16 laser_temp_low_warn;
+ __be16 tec_cur_high_alarm;
+ __be16 tec_cur_low_alarm;
+ __be16 tec_cur_high_warn;
+ __be16 tec_cur_low_warn;
+ __be32 cal_rxpwr4;
+ __be32 cal_rxpwr3;
+ __be32 cal_rxpwr2;
+ __be32 cal_rxpwr1;
+ __be32 cal_rxpwr0;
+ __be16 cal_txi_slope;
+ __be16 cal_txi_offset;
+ __be16 cal_txpwr_slope;
+ __be16 cal_txpwr_offset;
+ __be16 cal_t_slope;
+ __be16 cal_t_offset;
+ __be16 cal_v_slope;
+ __be16 cal_v_offset;
+} __packed;
+
/* SFP EEPROM registers */
enum {
SFP_PHYS_ID = 0x00,
@@ -384,7 +428,33 @@ enum {
SFP_TEC_CUR = 0x6c,
SFP_STATUS = 0x6e,
- SFP_ALARM = 0x70,
+ SFP_ALARM0 = 0x70,
+ SFP_ALARM0_TEMP_HIGH = BIT(7),
+ SFP_ALARM0_TEMP_LOW = BIT(6),
+ SFP_ALARM0_VCC_HIGH = BIT(5),
+ SFP_ALARM0_VCC_LOW = BIT(4),
+ SFP_ALARM0_TX_BIAS_HIGH = BIT(3),
+ SFP_ALARM0_TX_BIAS_LOW = BIT(2),
+ SFP_ALARM0_TXPWR_HIGH = BIT(1),
+ SFP_ALARM0_TXPWR_LOW = BIT(0),
+
+ SFP_ALARM1 = 0x71,
+ SFP_ALARM1_RXPWR_HIGH = BIT(7),
+ SFP_ALARM1_RXPWR_LOW = BIT(6),
+
+ SFP_WARN0 = 0x74,
+ SFP_WARN0_TEMP_HIGH = BIT(7),
+ SFP_WARN0_TEMP_LOW = BIT(6),
+ SFP_WARN0_VCC_HIGH = BIT(5),
+ SFP_WARN0_VCC_LOW = BIT(4),
+ SFP_WARN0_TX_BIAS_HIGH = BIT(3),
+ SFP_WARN0_TX_BIAS_LOW = BIT(2),
+ SFP_WARN0_TXPWR_HIGH = BIT(1),
+ SFP_WARN0_TXPWR_LOW = BIT(0),
+
+ SFP_WARN1 = 0x75,
+ SFP_WARN1_RXPWR_HIGH = BIT(7),
+ SFP_WARN1_RXPWR_LOW = BIT(6),
SFP_EXT_STATUS = 0x76,
SFP_VSL = 0x78,
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 6794490f25b2..9443cafd1969 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -12,6 +12,9 @@
struct shrink_control {
gfp_t gfp_mask;
+ /* current node being shrunk (for NUMA aware shrinkers) */
+ int nid;
+
/*
* How many objects scan_objects should scan and try to reclaim.
* This is reset before every call, so it is safe for callees
@@ -26,20 +29,20 @@ struct shrink_control {
*/
unsigned long nr_scanned;
- /* current node being shrunk (for NUMA aware shrinkers) */
- int nid;
-
/* current memcg being shrunk (for memcg aware shrinkers) */
struct mem_cgroup *memcg;
};
#define SHRINK_STOP (~0UL)
+#define SHRINK_EMPTY (~0UL - 1)
/*
* A callback you can register to apply pressure to ageable caches.
*
* @count_objects should return the number of freeable items in the cache. If
- * there are no objects to free or the number of freeable items cannot be
- * determined, it should return 0. No deadlock checks should be done during the
+ * there are no objects to free, it should return SHRINK_EMPTY, while 0 is
+ * returned in cases of the number of freeable items cannot be determined
+ * or shrinker should skip this cache for this time (e.g., their number
+ * is below shrinkable limit). No deadlock checks should be done during the
* count callback - the shrinker relies on aggregating scan counts that couldn't
* be executed due to potential deadlocks to be run at a later call when the
* deadlock condition is no longer pending.
@@ -60,12 +63,16 @@ struct shrinker {
unsigned long (*scan_objects)(struct shrinker *,
struct shrink_control *sc);
- int seeks; /* seeks to recreate an obj */
long batch; /* reclaim batch size, 0 = default */
- unsigned long flags;
+ int seeks; /* seeks to recreate an obj */
+ unsigned flags;
/* These are for internal use */
struct list_head list;
+#ifdef CONFIG_MEMCG_KMEM
+ /* ID in shrinker_idr */
+ int id;
+#endif
/* objs pending delete, per node */
atomic_long_t *nr_deferred;
};
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 3c5200137b24..3d4cd5db30a9 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -254,18 +254,20 @@ static inline int valid_signal(unsigned long sig)
struct timespec;
struct pt_regs;
+enum pid_type;
extern int next_signal(struct sigpending *pending, sigset_t *mask);
extern int do_send_sig_info(int sig, struct siginfo *info,
- struct task_struct *p, bool group);
-extern int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p);
+ struct task_struct *p, enum pid_type type);
+extern int group_send_sig_info(int sig, struct siginfo *info,
+ struct task_struct *p, enum pid_type type);
extern int __group_send_sig_info(int, struct siginfo *, struct task_struct *);
extern int sigprocmask(int, sigset_t *, sigset_t *);
extern void set_current_blocked(sigset_t *);
extern void __set_current_blocked(const sigset_t *);
extern int show_unhandled_signals;
-extern int get_signal(struct ksignal *ksig);
+extern bool get_signal(struct ksignal *ksig);
extern void signal_setup_done(int failed, struct ksignal *ksig, int stepping);
extern void exit_signals(struct task_struct *tsk);
extern void kernel_sigaction(int, __sighandler_t);
@@ -287,7 +289,7 @@ static inline void disallow_signal(int sig)
extern struct kmem_cache *sighand_cachep;
-int unhandled_signal(struct task_struct *tsk, int sig);
+extern bool unhandled_signal(struct task_struct *tsk, int sig);
/*
* In POSIX a signal is sent either to a specific thread (Linux task)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c86885954994..17a13e4785fc 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -630,6 +630,7 @@ typedef unsigned char *sk_buff_data_t;
* @hash: the packet hash
* @queue_mapping: Queue mapping for multiqueue devices
* @xmit_more: More SKBs are pending for this queue
+ * @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
* @ndisc_nodetype: router type (from link layer)
* @ooo_okay: allow the mapping of a socket to a queue to be changed
* @l4_hash: indicate hash is a canonical 4-tuple hash over transport
@@ -640,6 +641,7 @@ typedef unsigned char *sk_buff_data_t;
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
* @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL
* @dst_pending_confirm: need to confirm neighbour
+ * @decrypted: Decrypted SKB
* @napi_id: id of the NAPI struct this skb came from
* @secmark: security marking
* @mark: Generic packet mark
@@ -674,12 +676,16 @@ struct sk_buff {
* UDP receive path is one user.
*/
unsigned long dev_scratch;
- int ip_defrag_offset;
};
};
- struct rb_node rbnode; /* used in netem & tcp stack */
+ struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
+ struct list_head list;
+ };
+
+ union {
+ struct sock *sk;
+ int ip_defrag_offset;
};
- struct sock *sk;
union {
ktime_t tstamp;
@@ -735,7 +741,7 @@ struct sk_buff {
peeked:1,
head_frag:1,
xmit_more:1,
- __unused:1; /* one bit hole */
+ pfmemalloc:1;
/* fields enclosed in headers_start/headers_end are copied
* using a single memcpy() in __copy_skb_header()
@@ -754,31 +760,30 @@ struct sk_buff {
__u8 __pkt_type_offset[0];
__u8 pkt_type:3;
- __u8 pfmemalloc:1;
__u8 ignore_df:1;
-
__u8 nf_trace:1;
__u8 ip_summed:2;
__u8 ooo_okay:1;
+
__u8 l4_hash:1;
__u8 sw_hash:1;
__u8 wifi_acked_valid:1;
__u8 wifi_acked:1;
-
__u8 no_fcs:1;
/* Indicates the inner headers are valid in the skbuff. */
__u8 encapsulation:1;
__u8 encap_hdr_csum:1;
__u8 csum_valid:1;
+
__u8 csum_complete_sw:1;
__u8 csum_level:2;
__u8 csum_not_inet:1;
-
__u8 dst_pending_confirm:1;
#ifdef CONFIG_IPV6_NDISC_NODETYPE
__u8 ndisc_nodetype:2;
#endif
__u8 ipvs_property:1;
+
__u8 inner_protocol_type:1;
__u8 remcsum_offload:1;
#ifdef CONFIG_NET_SWITCHDEV
@@ -791,6 +796,9 @@ struct sk_buff {
__u8 tc_redirected:1;
__u8 tc_from_ingress:1;
#endif
+#ifdef CONFIG_TLS_DEVICE
+ __u8 decrypted:1;
+#endif
#ifdef CONFIG_NET_SCHED
__u16 tc_index; /* traffic control index */
@@ -1030,6 +1038,7 @@ static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
}
struct sk_buff *skb_morph(struct sk_buff *dst, struct sk_buff *src);
+void skb_headers_offset_update(struct sk_buff *skb, int off);
int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask);
struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t priority);
void skb_copy_header(struct sk_buff *new, const struct sk_buff *old);
@@ -2354,7 +2363,7 @@ static inline void skb_probe_transport_header(struct sk_buff *skb,
if (skb_transport_header_was_set(skb))
return;
- if (skb_flow_dissect_flow_keys_basic(skb, &keys, 0, 0, 0, 0, 0))
+ if (skb_flow_dissect_flow_keys_basic(skb, &keys, NULL, 0, 0, 0, 0))
skb_set_transport_header(skb, keys.control.thoff);
else
skb_set_transport_header(skb, offset_hint);
@@ -2580,7 +2589,7 @@ static inline void __skb_queue_purge(struct sk_buff_head *list)
kfree_skb(skb);
}
-void skb_rbtree_purge(struct rb_root *root);
+unsigned int skb_rbtree_purge(struct rb_root *root);
void *netdev_alloc_frag(unsigned int fragsz);
@@ -3252,7 +3261,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
int *peeked, int *off, int *err);
struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags, int noblock,
int *err);
-__poll_t datagram_poll_mask(struct socket *sock, __poll_t events);
+__poll_t datagram_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
int skb_copy_datagram_iter(const struct sk_buff *from, int offset,
struct iov_iter *to, int size);
static inline int skb_copy_datagram_msg(const struct sk_buff *from, int offset,
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 14e3fe4bd6a1..ed9cbddeb4a6 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -97,7 +97,7 @@
# define SLAB_FAILSLAB 0
#endif
/* Account to memcg */
-#if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB)
+#ifdef CONFIG_MEMCG_KMEM
# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
#else
# define SLAB_ACCOUNT 0
diff --git a/include/linux/slimbus.h b/include/linux/slimbus.h
index c36cf121d2cd..12c9719b2a55 100644
--- a/include/linux/slimbus.h
+++ b/include/linux/slimbus.h
@@ -14,16 +14,16 @@ extern struct bus_type slimbus_bus;
/**
* struct slim_eaddr - Enumeration address for a SLIMbus device
- * @manf_id: Manufacturer Id for the device
- * @prod_code: Product code
- * @dev_index: Device index
* @instance: Instance value
+ * @dev_index: Device index
+ * @prod_code: Product code
+ * @manf_id: Manufacturer Id for the device
*/
struct slim_eaddr {
- u16 manf_id;
- u16 prod_code;
- u8 dev_index;
u8 instance;
+ u8 dev_index;
+ u16 prod_code;
+ u16 manf_id;
} __packed;
/**
@@ -48,6 +48,8 @@ struct slim_controller;
* @ctrl: slim controller instance.
* @laddr: 1-byte Logical address of this device.
* @is_laddr_valid: indicates if the laddr is valid or not
+ * @stream_list: List of streams on this device
+ * @stream_list_lock: lock to protect the stream list
*
* This is the client/device handle returned when a SLIMbus
* device is registered with a controller.
@@ -60,6 +62,8 @@ struct slim_device {
enum slim_device_status status;
u8 laddr;
bool is_laddr_valid;
+ struct list_head stream_list;
+ spinlock_t stream_list_lock;
};
#define to_slim_device(d) container_of(d, struct slim_device, dev)
@@ -108,6 +112,36 @@ struct slim_val_inf {
struct completion *comp;
};
+#define SLIM_DEVICE_MAX_CHANNELS 256
+/* A SLIMBus Device may have frmo 0 to 31 Ports (inclusive) */
+#define SLIM_DEVICE_MAX_PORTS 32
+
+/**
+ * struct slim_stream_config - SLIMbus stream configuration
+ * Configuring a stream is done at hw_params or prepare call
+ * from audio drivers where they have all the required information
+ * regarding rate, number of channels and so on.
+ * There is a 1:1 mapping of channel and ports.
+ *
+ * @rate: data rate
+ * @bps: bits per data sample
+ * @ch_count: number of channels
+ * @chs: pointer to list of channel numbers
+ * @port_mask: port mask of ports to use for this stream
+ * @direction: direction of the stream, SNDRV_PCM_STREAM_PLAYBACK
+ * or SNDRV_PCM_STREAM_CAPTURE.
+ */
+struct slim_stream_config {
+ unsigned int rate;
+ unsigned int bps;
+ /* MAX 256 channels */
+ unsigned int ch_count;
+ unsigned int *chs;
+ /* Max 32 ports per device */
+ unsigned long port_mask;
+ int direction;
+};
+
/*
* use a macro to avoid include chaining to get THIS_MODULE
*/
@@ -138,6 +172,8 @@ static inline void slim_set_devicedata(struct slim_device *dev, void *data)
dev_set_drvdata(&dev->dev, data);
}
+struct slim_device *of_slim_get_device(struct slim_controller *ctrl,
+ struct device_node *np);
struct slim_device *slim_get_device(struct slim_controller *ctrl,
struct slim_eaddr *e_addr);
int slim_get_logical_addr(struct slim_device *sbdev);
@@ -161,4 +197,16 @@ int slim_readb(struct slim_device *sdev, u32 addr);
int slim_writeb(struct slim_device *sdev, u32 addr, u8 value);
int slim_read(struct slim_device *sdev, u32 addr, size_t count, u8 *val);
int slim_write(struct slim_device *sdev, u32 addr, size_t count, u8 *val);
+
+/* SLIMbus Stream apis */
+struct slim_stream_runtime;
+struct slim_stream_runtime *slim_stream_allocate(struct slim_device *dev,
+ const char *sname);
+int slim_stream_prepare(struct slim_stream_runtime *stream,
+ struct slim_stream_config *c);
+int slim_stream_enable(struct slim_stream_runtime *stream);
+int slim_stream_disable(struct slim_stream_runtime *stream);
+int slim_stream_unprepare(struct slim_stream_runtime *stream);
+int slim_stream_free(struct slim_stream_runtime *stream);
+
#endif /* _LINUX_SLIMBUS_H */
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 09fa2c6f0e68..3a1a1dbc6f49 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -155,8 +155,12 @@ struct kmem_cache {
#ifdef CONFIG_SYSFS
#define SLAB_SUPPORTS_SYSFS
+void sysfs_slab_unlink(struct kmem_cache *);
void sysfs_slab_release(struct kmem_cache *);
#else
+static inline void sysfs_slab_unlink(struct kmem_cache *s)
+{
+}
static inline void sysfs_slab_release(struct kmem_cache *s)
{
}
diff --git a/include/linux/smpboot.h b/include/linux/smpboot.h
index c174844cf663..d0884b525001 100644
--- a/include/linux/smpboot.h
+++ b/include/linux/smpboot.h
@@ -25,8 +25,6 @@ struct smpboot_thread_data;
* parked (cpu offline)
* @unpark: Optional unpark function, called when the thread is
* unparked (cpu online)
- * @cpumask: Internal state. To update which threads are unparked,
- * call smpboot_update_cpumask_percpu_thread().
* @selfparking: Thread is not parked by the park function.
* @thread_comm: The base name of the thread
*/
@@ -40,23 +38,12 @@ struct smp_hotplug_thread {
void (*cleanup)(unsigned int cpu, bool online);
void (*park)(unsigned int cpu);
void (*unpark)(unsigned int cpu);
- cpumask_var_t cpumask;
bool selfparking;
const char *thread_comm;
};
-int smpboot_register_percpu_thread_cpumask(struct smp_hotplug_thread *plug_thread,
- const struct cpumask *cpumask);
-
-static inline int
-smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
-{
- return smpboot_register_percpu_thread_cpumask(plug_thread,
- cpu_possible_mask);
-}
+int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread);
void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread);
-void smpboot_update_cpumask_percpu_thread(struct smp_hotplug_thread *plug_thread,
- const struct cpumask *);
#endif
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
new file mode 100644
index 000000000000..7e3b9c605ab2
--- /dev/null
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -0,0 +1,180 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ */
+
+#include <linux/platform_device.h>
+#ifndef __LLCC_QCOM__
+#define __LLCC_QCOM__
+
+#define LLCC_CPUSS 1
+#define LLCC_VIDSC0 2
+#define LLCC_VIDSC1 3
+#define LLCC_ROTATOR 4
+#define LLCC_VOICE 5
+#define LLCC_AUDIO 6
+#define LLCC_MDMHPGRW 7
+#define LLCC_MDM 8
+#define LLCC_CMPT 10
+#define LLCC_GPUHTW 11
+#define LLCC_GPU 12
+#define LLCC_MMUHWT 13
+#define LLCC_CMPTDMA 15
+#define LLCC_DISP 16
+#define LLCC_VIDFW 17
+#define LLCC_MDMHPFX 20
+#define LLCC_MDMPNG 21
+#define LLCC_AUDHW 22
+
+/**
+ * llcc_slice_desc - Cache slice descriptor
+ * @slice_id: llcc slice id
+ * @slice_size: Size allocated for the llcc slice
+ */
+struct llcc_slice_desc {
+ u32 slice_id;
+ size_t slice_size;
+};
+
+/**
+ * llcc_slice_config - Data associated with the llcc slice
+ * @usecase_id: usecase id for which the llcc slice is used
+ * @slice_id: llcc slice id assigned to each slice
+ * @max_cap: maximum capacity of the llcc slice
+ * @priority: priority of the llcc slice
+ * @fixed_size: whether the llcc slice can grow beyond its size
+ * @bonus_ways: bonus ways associated with llcc slice
+ * @res_ways: reserved ways associated with llcc slice
+ * @cache_mode: mode of the llcc slice
+ * @probe_target_ways: Probe only reserved and bonus ways on a cache miss
+ * @dis_cap_alloc: Disable capacity based allocation
+ * @retain_on_pc: Retain through power collapse
+ * @activate_on_init: activate the slice on init
+ */
+struct llcc_slice_config {
+ u32 usecase_id;
+ u32 slice_id;
+ u32 max_cap;
+ u32 priority;
+ bool fixed_size;
+ u32 bonus_ways;
+ u32 res_ways;
+ u32 cache_mode;
+ u32 probe_target_ways;
+ bool dis_cap_alloc;
+ bool retain_on_pc;
+ bool activate_on_init;
+};
+
+/**
+ * llcc_drv_data - Data associated with the llcc driver
+ * @regmap: regmap associated with the llcc device
+ * @cfg: pointer to the data structure for slice configuration
+ * @lock: mutex associated with each slice
+ * @cfg_size: size of the config data table
+ * @max_slices: max slices as read from device tree
+ * @bcast_off: Offset of the broadcast bank
+ * @num_banks: Number of llcc banks
+ * @bitmap: Bit map to track the active slice ids
+ * @offsets: Pointer to the bank offsets array
+ */
+struct llcc_drv_data {
+ struct regmap *regmap;
+ const struct llcc_slice_config *cfg;
+ struct mutex lock;
+ u32 cfg_size;
+ u32 max_slices;
+ u32 bcast_off;
+ u32 num_banks;
+ unsigned long *bitmap;
+ u32 *offsets;
+};
+
+#if IS_ENABLED(CONFIG_QCOM_LLCC)
+/**
+ * llcc_slice_getd - get llcc slice descriptor
+ * @uid: usecase_id of the client
+ */
+struct llcc_slice_desc *llcc_slice_getd(u32 uid);
+
+/**
+ * llcc_slice_putd - llcc slice descritpor
+ * @desc: Pointer to llcc slice descriptor
+ */
+void llcc_slice_putd(struct llcc_slice_desc *desc);
+
+/**
+ * llcc_get_slice_id - get slice id
+ * @desc: Pointer to llcc slice descriptor
+ */
+int llcc_get_slice_id(struct llcc_slice_desc *desc);
+
+/**
+ * llcc_get_slice_size - llcc slice size
+ * @desc: Pointer to llcc slice descriptor
+ */
+size_t llcc_get_slice_size(struct llcc_slice_desc *desc);
+
+/**
+ * llcc_slice_activate - Activate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ */
+int llcc_slice_activate(struct llcc_slice_desc *desc);
+
+/**
+ * llcc_slice_deactivate - Deactivate the llcc slice
+ * @desc: Pointer to llcc slice descriptor
+ */
+int llcc_slice_deactivate(struct llcc_slice_desc *desc);
+
+/**
+ * qcom_llcc_probe - program the sct table
+ * @pdev: platform device pointer
+ * @table: soc sct table
+ * @sz: Size of the config table
+ */
+int qcom_llcc_probe(struct platform_device *pdev,
+ const struct llcc_slice_config *table, u32 sz);
+#else
+static inline struct llcc_slice_desc *llcc_slice_getd(u32 uid)
+{
+ return NULL;
+}
+
+static inline void llcc_slice_putd(struct llcc_slice_desc *desc)
+{
+
+};
+
+static inline int llcc_get_slice_id(struct llcc_slice_desc *desc)
+{
+ return -EINVAL;
+}
+
+static inline size_t llcc_get_slice_size(struct llcc_slice_desc *desc)
+{
+ return 0;
+}
+static inline int llcc_slice_activate(struct llcc_slice_desc *desc)
+{
+ return -EINVAL;
+}
+
+static inline int llcc_slice_deactivate(struct llcc_slice_desc *desc)
+{
+ return -EINVAL;
+}
+static inline int qcom_llcc_probe(struct platform_device *pdev,
+ const struct llcc_slice_config *table, u32 sz)
+{
+ return -ENODEV;
+}
+
+static inline int qcom_llcc_remove(struct platform_device *pdev)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/include/linux/soc/qcom/mdt_loader.h b/include/linux/soc/qcom/mdt_loader.h
index 5b98bbdabc25..944b06aefb0f 100644
--- a/include/linux/soc/qcom/mdt_loader.h
+++ b/include/linux/soc/qcom/mdt_loader.h
@@ -17,4 +17,8 @@ int qcom_mdt_load(struct device *dev, const struct firmware *fw,
phys_addr_t mem_phys, size_t mem_size,
phys_addr_t *reloc_base);
+int qcom_mdt_load_no_init(struct device *dev, const struct firmware *fw,
+ const char *fw_name, int pas_id, void *mem_region,
+ phys_addr_t mem_phys, size_t mem_size,
+ phys_addr_t *reloc_base);
#endif
diff --git a/include/linux/soc/renesas/rcar-sysc.h b/include/linux/soc/renesas/rcar-sysc.h
index 8a6086d2e9c3..00fae6fd234d 100644
--- a/include/linux/soc/renesas/rcar-sysc.h
+++ b/include/linux/soc/renesas/rcar-sysc.h
@@ -2,16 +2,7 @@
#ifndef __LINUX_SOC_RENESAS_RCAR_SYSC_H__
#define __LINUX_SOC_RENESAS_RCAR_SYSC_H__
-#include <linux/types.h>
-
-struct rcar_sysc_ch {
- u16 chan_offs;
- u8 chan_bit;
- u8 isr_bit;
-};
-
-int rcar_sysc_power_down(const struct rcar_sysc_ch *sysc_ch);
-int rcar_sysc_power_up(const struct rcar_sysc_ch *sysc_ch);
-void rcar_sysc_init(phys_addr_t base, u32 syscier);
+int rcar_sysc_power_down_cpu(unsigned int cpu);
+int rcar_sysc_power_up_cpu(unsigned int cpu);
#endif /* __LINUX_SOC_RENESAS_RCAR_SYSC_H__ */
diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h
index 66dcb9ec273a..5addaf5ccbce 100644
--- a/include/linux/soc/samsung/exynos-regs-pmu.h
+++ b/include/linux/soc/samsung/exynos-regs-pmu.h
@@ -42,7 +42,9 @@
#define EXYNOS_SWRESET 0x0400
#define S5P_WAKEUP_STAT 0x0600
-#define S5P_EINT_WAKEUP_MASK 0x0604
+/* Value for EXYNOS_EINT_WAKEUP_MASK disabling all external wakeup interrupts */
+#define EXYNOS_EINT_WAKEUP_MASK_DISABLED 0xffffffff
+#define EXYNOS_EINT_WAKEUP_MASK 0x0604
#define S5P_WAKEUP_MASK 0x0608
#define S5P_WAKEUP_MASK2 0x0614
@@ -180,6 +182,9 @@
#define S5P_CORE_WAKEUP_FROM_LOCAL_CFG (0x3 << 8)
#define S5P_CORE_AUTOWAKEUP_EN (1 << 31)
+/* Only for S5Pv210 */
+#define S5PV210_EINT_WAKEUP_MASK 0xC004
+
/* Only for EXYNOS4210 */
#define S5P_CMU_CLKSTOP_LCD1_LOWPWR 0x1154
#define S5P_CMU_RESET_LCD1_LOWPWR 0x1174
@@ -641,6 +646,7 @@
| EXYNOS5420_KFC_USE_STANDBY_WFI3)
/* For EXYNOS5433 */
+#define EXYNOS5433_EINT_WAKEUP_MASK (0x060C)
#define EXYNOS5433_USBHOST30_PHY_CONTROL (0x0728)
#define EXYNOS5433_PAD_RETENTION_AUD_OPTION (0x3028)
#define EXYNOS5433_PAD_RETENTION_MMC2_OPTION (0x30C8)
diff --git a/include/linux/spi/adi_spi3.h b/include/linux/spi/adi_spi3.h
deleted file mode 100644
index c84123aa1d06..000000000000
--- a/include/linux/spi/adi_spi3.h
+++ /dev/null
@@ -1,254 +0,0 @@
-/*
- * Analog Devices SPI3 controller driver
- *
- * Copyright (c) 2014 Analog Devices Inc.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef _ADI_SPI3_H_
-#define _ADI_SPI3_H_
-
-#include <linux/types.h>
-
-/* SPI_CONTROL */
-#define SPI_CTL_EN 0x00000001 /* Enable */
-#define SPI_CTL_MSTR 0x00000002 /* Master/Slave */
-#define SPI_CTL_PSSE 0x00000004 /* controls modf error in master mode */
-#define SPI_CTL_ODM 0x00000008 /* Open Drain Mode */
-#define SPI_CTL_CPHA 0x00000010 /* Clock Phase */
-#define SPI_CTL_CPOL 0x00000020 /* Clock Polarity */
-#define SPI_CTL_ASSEL 0x00000040 /* Slave Select Pin Control */
-#define SPI_CTL_SELST 0x00000080 /* Slave Select Polarity in-between transfers */
-#define SPI_CTL_EMISO 0x00000100 /* Enable MISO */
-#define SPI_CTL_SIZE 0x00000600 /* Word Transfer Size */
-#define SPI_CTL_SIZE08 0x00000000 /* SIZE: 8 bits */
-#define SPI_CTL_SIZE16 0x00000200 /* SIZE: 16 bits */
-#define SPI_CTL_SIZE32 0x00000400 /* SIZE: 32 bits */
-#define SPI_CTL_LSBF 0x00001000 /* LSB First */
-#define SPI_CTL_FCEN 0x00002000 /* Flow-Control Enable */
-#define SPI_CTL_FCCH 0x00004000 /* Flow-Control Channel Selection */
-#define SPI_CTL_FCPL 0x00008000 /* Flow-Control Polarity */
-#define SPI_CTL_FCWM 0x00030000 /* Flow-Control Water-Mark */
-#define SPI_CTL_FIFO0 0x00000000 /* FCWM: TFIFO empty or RFIFO Full */
-#define SPI_CTL_FIFO1 0x00010000 /* FCWM: TFIFO 75% or more empty or RFIFO 75% or more full */
-#define SPI_CTL_FIFO2 0x00020000 /* FCWM: TFIFO 50% or more empty or RFIFO 50% or more full */
-#define SPI_CTL_FMODE 0x00040000 /* Fast-mode Enable */
-#define SPI_CTL_MIOM 0x00300000 /* Multiple I/O Mode */
-#define SPI_CTL_MIO_DIS 0x00000000 /* MIOM: Disable */
-#define SPI_CTL_MIO_DUAL 0x00100000 /* MIOM: Enable DIOM (Dual I/O Mode) */
-#define SPI_CTL_MIO_QUAD 0x00200000 /* MIOM: Enable QUAD (Quad SPI Mode) */
-#define SPI_CTL_SOSI 0x00400000 /* Start on MOSI */
-/* SPI_RX_CONTROL */
-#define SPI_RXCTL_REN 0x00000001 /* Receive Channel Enable */
-#define SPI_RXCTL_RTI 0x00000004 /* Receive Transfer Initiate */
-#define SPI_RXCTL_RWCEN 0x00000008 /* Receive Word Counter Enable */
-#define SPI_RXCTL_RDR 0x00000070 /* Receive Data Request */
-#define SPI_RXCTL_RDR_DIS 0x00000000 /* RDR: Disabled */
-#define SPI_RXCTL_RDR_NE 0x00000010 /* RDR: RFIFO not empty */
-#define SPI_RXCTL_RDR_25 0x00000020 /* RDR: RFIFO 25% full */
-#define SPI_RXCTL_RDR_50 0x00000030 /* RDR: RFIFO 50% full */
-#define SPI_RXCTL_RDR_75 0x00000040 /* RDR: RFIFO 75% full */
-#define SPI_RXCTL_RDR_FULL 0x00000050 /* RDR: RFIFO full */
-#define SPI_RXCTL_RDO 0x00000100 /* Receive Data Over-Run */
-#define SPI_RXCTL_RRWM 0x00003000 /* FIFO Regular Water-Mark */
-#define SPI_RXCTL_RWM_0 0x00000000 /* RRWM: RFIFO Empty */
-#define SPI_RXCTL_RWM_25 0x00001000 /* RRWM: RFIFO 25% full */
-#define SPI_RXCTL_RWM_50 0x00002000 /* RRWM: RFIFO 50% full */
-#define SPI_RXCTL_RWM_75 0x00003000 /* RRWM: RFIFO 75% full */
-#define SPI_RXCTL_RUWM 0x00070000 /* FIFO Urgent Water-Mark */
-#define SPI_RXCTL_UWM_DIS 0x00000000 /* RUWM: Disabled */
-#define SPI_RXCTL_UWM_25 0x00010000 /* RUWM: RFIFO 25% full */
-#define SPI_RXCTL_UWM_50 0x00020000 /* RUWM: RFIFO 50% full */
-#define SPI_RXCTL_UWM_75 0x00030000 /* RUWM: RFIFO 75% full */
-#define SPI_RXCTL_UWM_FULL 0x00040000 /* RUWM: RFIFO full */
-/* SPI_TX_CONTROL */
-#define SPI_TXCTL_TEN 0x00000001 /* Transmit Channel Enable */
-#define SPI_TXCTL_TTI 0x00000004 /* Transmit Transfer Initiate */
-#define SPI_TXCTL_TWCEN 0x00000008 /* Transmit Word Counter Enable */
-#define SPI_TXCTL_TDR 0x00000070 /* Transmit Data Request */
-#define SPI_TXCTL_TDR_DIS 0x00000000 /* TDR: Disabled */
-#define SPI_TXCTL_TDR_NF 0x00000010 /* TDR: TFIFO not full */
-#define SPI_TXCTL_TDR_25 0x00000020 /* TDR: TFIFO 25% empty */
-#define SPI_TXCTL_TDR_50 0x00000030 /* TDR: TFIFO 50% empty */
-#define SPI_TXCTL_TDR_75 0x00000040 /* TDR: TFIFO 75% empty */
-#define SPI_TXCTL_TDR_EMPTY 0x00000050 /* TDR: TFIFO empty */
-#define SPI_TXCTL_TDU 0x00000100 /* Transmit Data Under-Run */
-#define SPI_TXCTL_TRWM 0x00003000 /* FIFO Regular Water-Mark */
-#define SPI_TXCTL_RWM_FULL 0x00000000 /* TRWM: TFIFO full */
-#define SPI_TXCTL_RWM_25 0x00001000 /* TRWM: TFIFO 25% empty */
-#define SPI_TXCTL_RWM_50 0x00002000 /* TRWM: TFIFO 50% empty */
-#define SPI_TXCTL_RWM_75 0x00003000 /* TRWM: TFIFO 75% empty */
-#define SPI_TXCTL_TUWM 0x00070000 /* FIFO Urgent Water-Mark */
-#define SPI_TXCTL_UWM_DIS 0x00000000 /* TUWM: Disabled */
-#define SPI_TXCTL_UWM_25 0x00010000 /* TUWM: TFIFO 25% empty */
-#define SPI_TXCTL_UWM_50 0x00020000 /* TUWM: TFIFO 50% empty */
-#define SPI_TXCTL_UWM_75 0x00030000 /* TUWM: TFIFO 75% empty */
-#define SPI_TXCTL_UWM_EMPTY 0x00040000 /* TUWM: TFIFO empty */
-/* SPI_CLOCK */
-#define SPI_CLK_BAUD 0x0000FFFF /* Baud Rate */
-/* SPI_DELAY */
-#define SPI_DLY_STOP 0x000000FF /* Transfer delay time in multiples of SCK period */
-#define SPI_DLY_LEADX 0x00000100 /* Extended (1 SCK) LEAD Control */
-#define SPI_DLY_LAGX 0x00000200 /* Extended (1 SCK) LAG control */
-/* SPI_SSEL */
-#define SPI_SLVSEL_SSE1 0x00000002 /* SPISSEL1 Enable */
-#define SPI_SLVSEL_SSE2 0x00000004 /* SPISSEL2 Enable */
-#define SPI_SLVSEL_SSE3 0x00000008 /* SPISSEL3 Enable */
-#define SPI_SLVSEL_SSE4 0x00000010 /* SPISSEL4 Enable */
-#define SPI_SLVSEL_SSE5 0x00000020 /* SPISSEL5 Enable */
-#define SPI_SLVSEL_SSE6 0x00000040 /* SPISSEL6 Enable */
-#define SPI_SLVSEL_SSE7 0x00000080 /* SPISSEL7 Enable */
-#define SPI_SLVSEL_SSEL1 0x00000200 /* SPISSEL1 Value */
-#define SPI_SLVSEL_SSEL2 0x00000400 /* SPISSEL2 Value */
-#define SPI_SLVSEL_SSEL3 0x00000800 /* SPISSEL3 Value */
-#define SPI_SLVSEL_SSEL4 0x00001000 /* SPISSEL4 Value */
-#define SPI_SLVSEL_SSEL5 0x00002000 /* SPISSEL5 Value */
-#define SPI_SLVSEL_SSEL6 0x00004000 /* SPISSEL6 Value */
-#define SPI_SLVSEL_SSEL7 0x00008000 /* SPISSEL7 Value */
-/* SPI_RWC */
-#define SPI_RWC_VALUE 0x0000FFFF /* Received Word-Count */
-/* SPI_RWCR */
-#define SPI_RWCR_VALUE 0x0000FFFF /* Received Word-Count Reload */
-/* SPI_TWC */
-#define SPI_TWC_VALUE 0x0000FFFF /* Transmitted Word-Count */
-/* SPI_TWCR */
-#define SPI_TWCR_VALUE 0x0000FFFF /* Transmitted Word-Count Reload */
-/* SPI_IMASK */
-#define SPI_IMSK_RUWM 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */
-#define SPI_IMSK_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */
-#define SPI_IMSK_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */
-#define SPI_IMSK_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */
-#define SPI_IMSK_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */
-#define SPI_IMSK_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */
-#define SPI_IMSK_RSM 0x00000100 /* Receive Start Interrupt Mask */
-#define SPI_IMSK_TSM 0x00000200 /* Transmit Start Interrupt Mask */
-#define SPI_IMSK_RFM 0x00000400 /* Receive Finish Interrupt Mask */
-#define SPI_IMSK_TFM 0x00000800 /* Transmit Finish Interrupt Mask */
-/* SPI_IMASKCL */
-#define SPI_IMSK_CLR_RUW 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */
-#define SPI_IMSK_CLR_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */
-#define SPI_IMSK_CLR_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */
-#define SPI_IMSK_CLR_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */
-#define SPI_IMSK_CLR_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */
-#define SPI_IMSK_CLR_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */
-#define SPI_IMSK_CLR_RSM 0x00000100 /* Receive Start Interrupt Mask */
-#define SPI_IMSK_CLR_TSM 0x00000200 /* Transmit Start Interrupt Mask */
-#define SPI_IMSK_CLR_RFM 0x00000400 /* Receive Finish Interrupt Mask */
-#define SPI_IMSK_CLR_TFM 0x00000800 /* Transmit Finish Interrupt Mask */
-/* SPI_IMASKST */
-#define SPI_IMSK_SET_RUWM 0x00000002 /* Receive Urgent Water-Mark Interrupt Mask */
-#define SPI_IMSK_SET_TUWM 0x00000004 /* Transmit Urgent Water-Mark Interrupt Mask */
-#define SPI_IMSK_SET_ROM 0x00000010 /* Receive Over-Run Error Interrupt Mask */
-#define SPI_IMSK_SET_TUM 0x00000020 /* Transmit Under-Run Error Interrupt Mask */
-#define SPI_IMSK_SET_TCM 0x00000040 /* Transmit Collision Error Interrupt Mask */
-#define SPI_IMSK_SET_MFM 0x00000080 /* Mode Fault Error Interrupt Mask */
-#define SPI_IMSK_SET_RSM 0x00000100 /* Receive Start Interrupt Mask */
-#define SPI_IMSK_SET_TSM 0x00000200 /* Transmit Start Interrupt Mask */
-#define SPI_IMSK_SET_RFM 0x00000400 /* Receive Finish Interrupt Mask */
-#define SPI_IMSK_SET_TFM 0x00000800 /* Transmit Finish Interrupt Mask */
-/* SPI_STATUS */
-#define SPI_STAT_SPIF 0x00000001 /* SPI Finished */
-#define SPI_STAT_RUWM 0x00000002 /* Receive Urgent Water-Mark Breached */
-#define SPI_STAT_TUWM 0x00000004 /* Transmit Urgent Water-Mark Breached */
-#define SPI_STAT_ROE 0x00000010 /* Receive Over-Run Error Indication */
-#define SPI_STAT_TUE 0x00000020 /* Transmit Under-Run Error Indication */
-#define SPI_STAT_TCE 0x00000040 /* Transmit Collision Error Indication */
-#define SPI_STAT_MODF 0x00000080 /* Mode Fault Error Indication */
-#define SPI_STAT_RS 0x00000100 /* Receive Start Indication */
-#define SPI_STAT_TS 0x00000200 /* Transmit Start Indication */
-#define SPI_STAT_RF 0x00000400 /* Receive Finish Indication */
-#define SPI_STAT_TF 0x00000800 /* Transmit Finish Indication */
-#define SPI_STAT_RFS 0x00007000 /* SPI_RFIFO status */
-#define SPI_STAT_RFIFO_EMPTY 0x00000000 /* RFS: RFIFO Empty */
-#define SPI_STAT_RFIFO_25 0x00001000 /* RFS: RFIFO 25% Full */
-#define SPI_STAT_RFIFO_50 0x00002000 /* RFS: RFIFO 50% Full */
-#define SPI_STAT_RFIFO_75 0x00003000 /* RFS: RFIFO 75% Full */
-#define SPI_STAT_RFIFO_FULL 0x00004000 /* RFS: RFIFO Full */
-#define SPI_STAT_TFS 0x00070000 /* SPI_TFIFO status */
-#define SPI_STAT_TFIFO_FULL 0x00000000 /* TFS: TFIFO full */
-#define SPI_STAT_TFIFO_25 0x00010000 /* TFS: TFIFO 25% empty */
-#define SPI_STAT_TFIFO_50 0x00020000 /* TFS: TFIFO 50% empty */
-#define SPI_STAT_TFIFO_75 0x00030000 /* TFS: TFIFO 75% empty */
-#define SPI_STAT_TFIFO_EMPTY 0x00040000 /* TFS: TFIFO empty */
-#define SPI_STAT_FCS 0x00100000 /* Flow-Control Stall Indication */
-#define SPI_STAT_RFE 0x00400000 /* SPI_RFIFO Empty */
-#define SPI_STAT_TFF 0x00800000 /* SPI_TFIFO Full */
-/* SPI_ILAT */
-#define SPI_ILAT_RUWMI 0x00000002 /* Receive Urgent Water Mark Interrupt */
-#define SPI_ILAT_TUWMI 0x00000004 /* Transmit Urgent Water Mark Interrupt */
-#define SPI_ILAT_ROI 0x00000010 /* Receive Over-Run Error Indication */
-#define SPI_ILAT_TUI 0x00000020 /* Transmit Under-Run Error Indication */
-#define SPI_ILAT_TCI 0x00000040 /* Transmit Collision Error Indication */
-#define SPI_ILAT_MFI 0x00000080 /* Mode Fault Error Indication */
-#define SPI_ILAT_RSI 0x00000100 /* Receive Start Indication */
-#define SPI_ILAT_TSI 0x00000200 /* Transmit Start Indication */
-#define SPI_ILAT_RFI 0x00000400 /* Receive Finish Indication */
-#define SPI_ILAT_TFI 0x00000800 /* Transmit Finish Indication */
-/* SPI_ILATCL */
-#define SPI_ILAT_CLR_RUWMI 0x00000002 /* Receive Urgent Water Mark Interrupt */
-#define SPI_ILAT_CLR_TUWMI 0x00000004 /* Transmit Urgent Water Mark Interrupt */
-#define SPI_ILAT_CLR_ROI 0x00000010 /* Receive Over-Run Error Indication */
-#define SPI_ILAT_CLR_TUI 0x00000020 /* Transmit Under-Run Error Indication */
-#define SPI_ILAT_CLR_TCI 0x00000040 /* Transmit Collision Error Indication */
-#define SPI_ILAT_CLR_MFI 0x00000080 /* Mode Fault Error Indication */
-#define SPI_ILAT_CLR_RSI 0x00000100 /* Receive Start Indication */
-#define SPI_ILAT_CLR_TSI 0x00000200 /* Transmit Start Indication */
-#define SPI_ILAT_CLR_RFI 0x00000400 /* Receive Finish Indication */
-#define SPI_ILAT_CLR_TFI 0x00000800 /* Transmit Finish Indication */
-
-/*
- * adi spi3 registers layout
- */
-struct adi_spi_regs {
- u32 revid;
- u32 control;
- u32 rx_control;
- u32 tx_control;
- u32 clock;
- u32 delay;
- u32 ssel;
- u32 rwc;
- u32 rwcr;
- u32 twc;
- u32 twcr;
- u32 reserved0;
- u32 emask;
- u32 emaskcl;
- u32 emaskst;
- u32 reserved1;
- u32 status;
- u32 elat;
- u32 elatcl;
- u32 reserved2;
- u32 rfifo;
- u32 reserved3;
- u32 tfifo;
-};
-
-#define MAX_CTRL_CS 8 /* cs in spi controller */
-
-/* device.platform_data for SSP controller devices */
-struct adi_spi3_master {
- u16 num_chipselect;
- u16 pin_req[7];
-};
-
-/* spi_board_info.controller_data for SPI slave devices,
- * copied to spi_device.platform_data ... mostly for dma tuning
- */
-struct adi_spi3_chip {
- u32 control;
- u16 cs_chg_udelay; /* Some devices require 16-bit delays */
- u32 tx_dummy_val; /* tx value for rx only transfer */
- bool enable_dma;
-};
-
-#endif /* _ADI_SPI3_H_ */
diff --git a/include/linux/spi/spi-mem.h b/include/linux/spi/spi-mem.h
index bb4bd15ae1f6..b2bd4b4127c4 100644
--- a/include/linux/spi/spi-mem.h
+++ b/include/linux/spi/spi-mem.h
@@ -3,7 +3,9 @@
* Copyright (C) 2018 Exceet Electronics GmbH
* Copyright (C) 2018 Bootlin
*
- * Author: Boris Brezillon <boris.brezillon@bootlin.com>
+ * Author:
+ * Peter Pan <peterpandong@micron.com>
+ * Boris Brezillon <boris.brezillon@bootlin.com>
*/
#ifndef __LINUX_SPI_MEM_H
@@ -122,7 +124,8 @@ struct spi_mem_op {
/**
* struct spi_mem - describes a SPI memory device
* @spi: the underlying SPI device
- * @drvpriv: spi_mem_drviver private data
+ * @drvpriv: spi_mem_driver private data
+ * @name: name of the SPI memory device
*
* Extra information that describe the SPI memory device and may be needed by
* the controller to properly handle this device should be placed here.
@@ -133,6 +136,7 @@ struct spi_mem_op {
struct spi_mem {
struct spi_device *spi;
void *drvpriv;
+ const char *name;
};
/**
@@ -165,6 +169,13 @@ static inline void *spi_mem_get_drvdata(struct spi_mem *mem)
* limitations)
* @supports_op: check if an operation is supported by the controller
* @exec_op: execute a SPI memory operation
+ * @get_name: get a custom name for the SPI mem device from the controller.
+ * This might be needed if the controller driver has been ported
+ * to use the SPI mem layer and a custom name is used to keep
+ * mtdparts compatible.
+ * Note that if the implementation of this function allocates memory
+ * dynamically, then it should do so with devm_xxx(), as we don't
+ * have a ->free_name() function.
*
* This interface should be implemented by SPI controllers providing an
* high-level interface to execute SPI memory operation, which is usually the
@@ -176,6 +187,7 @@ struct spi_controller_mem_ops {
const struct spi_mem_op *op);
int (*exec_op)(struct spi_mem *mem,
const struct spi_mem_op *op);
+ const char *(*get_name)(struct spi_mem *mem);
};
/**
@@ -234,6 +246,8 @@ bool spi_mem_supports_op(struct spi_mem *mem,
int spi_mem_exec_op(struct spi_mem *mem,
const struct spi_mem_op *op);
+const char *spi_mem_get_name(struct spi_mem *mem);
+
int spi_mem_driver_register_with_owner(struct spi_mem_driver *drv,
struct module *owner);
diff --git a/include/linux/spi/spi_bitbang.h b/include/linux/spi/spi_bitbang.h
index 51d8c060e513..b7e021b274dc 100644
--- a/include/linux/spi/spi_bitbang.h
+++ b/include/linux/spi/spi_bitbang.h
@@ -8,7 +8,7 @@ struct spi_bitbang {
struct mutex lock;
u8 busy;
u8 use_dma;
- u8 flags; /* extra spi->mode support */
+ u16 flags; /* extra spi->mode support */
struct spi_master *master;
@@ -30,7 +30,8 @@ struct spi_bitbang {
/* txrx_word[SPI_MODE_*]() just looks like a shift register */
u32 (*txrx_word[4])(struct spi_device *spi,
unsigned nsecs,
- u32 word, u8 bits);
+ u32 word, u8 bits, unsigned flags);
+ int (*set_line_direction)(struct spi_device *spi, bool output);
};
/* you can call these default bitbang->master methods from your custom
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 1e8a46435838..e089157dcf97 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -114,29 +114,48 @@ do { \
#endif /*arch_spin_is_contended*/
/*
- * This barrier must provide two things:
+ * smp_mb__after_spinlock() provides the equivalent of a full memory barrier
+ * between program-order earlier lock acquisitions and program-order later
+ * memory accesses.
*
- * - it must guarantee a STORE before the spin_lock() is ordered against a
- * LOAD after it, see the comments at its two usage sites.
+ * This guarantees that the following two properties hold:
*
- * - it must ensure the critical section is RCsc.
+ * 1) Given the snippet:
*
- * The latter is important for cases where we observe values written by other
- * CPUs in spin-loops, without barriers, while being subject to scheduling.
+ * { X = 0; Y = 0; }
*
- * CPU0 CPU1 CPU2
+ * CPU0 CPU1
*
- * for (;;) {
- * if (READ_ONCE(X))
- * break;
- * }
- * X=1
- * <sched-out>
- * <sched-in>
- * r = X;
+ * WRITE_ONCE(X, 1); WRITE_ONCE(Y, 1);
+ * spin_lock(S); smp_mb();
+ * smp_mb__after_spinlock(); r1 = READ_ONCE(X);
+ * r0 = READ_ONCE(Y);
+ * spin_unlock(S);
*
- * without transitivity it could be that CPU1 observes X!=0 breaks the loop,
- * we get migrated and CPU2 sees X==0.
+ * it is forbidden that CPU0 does not observe CPU1's store to Y (r0 = 0)
+ * and CPU1 does not observe CPU0's store to X (r1 = 0); see the comments
+ * preceding the call to smp_mb__after_spinlock() in __schedule() and in
+ * try_to_wake_up().
+ *
+ * 2) Given the snippet:
+ *
+ * { X = 0; Y = 0; }
+ *
+ * CPU0 CPU1 CPU2
+ *
+ * spin_lock(S); spin_lock(S); r1 = READ_ONCE(Y);
+ * WRITE_ONCE(X, 1); smp_mb__after_spinlock(); smp_rmb();
+ * spin_unlock(S); r0 = READ_ONCE(X); r2 = READ_ONCE(X);
+ * WRITE_ONCE(Y, 1);
+ * spin_unlock(S);
+ *
+ * it is forbidden that CPU0's critical section executes before CPU1's
+ * critical section (r0 = 1), CPU2 observes CPU1's store to Y (r1 = 1)
+ * and CPU2 does not observe CPU0's store to X (r2 = 0); see the comments
+ * preceding the calls to smp_rmb() in try_to_wake_up() for similar
+ * snippets but "projected" onto two CPUs.
+ *
+ * Property (2) upgrades the lock to an RCsc lock.
*
* Since most load-store architectures implement ACQUIRE with an smp_mb() after
* the LL/SC loop, they need no further barriers. Similarly all our TSO
@@ -427,9 +446,25 @@ extern int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock);
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
-int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
- size_t max_size, unsigned int cpu_mult,
- gfp_t gfp);
+extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
+ unsigned long *flags);
+#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
+ __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
+
+int __alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
+ size_t max_size, unsigned int cpu_mult,
+ gfp_t gfp, const char *name,
+ struct lock_class_key *key);
+
+#define alloc_bucket_spinlocks(locks, lock_mask, max_size, cpu_mult, gfp) \
+ ({ \
+ static struct lock_class_key key; \
+ int ret; \
+ \
+ ret = __alloc_bucket_spinlocks(locks, lock_mask, max_size, \
+ cpu_mult, gfp, #locks, &key); \
+ ret; \
+ })
void free_bucket_spinlocks(spinlock_t *locks);
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index 91494d7e8e41..67135d4a8a30 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -170,6 +170,11 @@ static inline int srcu_read_lock_held(const struct srcu_struct *sp)
#define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0)
/**
+ * srcu_dereference_notrace - no tracing and no lockdep calls from here
+ */
+#define srcu_dereference_notrace(p, sp) srcu_dereference_check((p), (sp), 1)
+
+/**
* srcu_read_lock - register a new reader for an SRCU-protected structure.
* @sp: srcu_struct in which to register the new reader.
*
@@ -195,6 +200,16 @@ static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp)
return retval;
}
+/* Used by tracing, cannot be traced and cannot invoke lockdep. */
+static inline notrace int
+srcu_read_lock_notrace(struct srcu_struct *sp) __acquires(sp)
+{
+ int retval;
+
+ retval = __srcu_read_lock(sp);
+ return retval;
+}
+
/**
* srcu_read_unlock - unregister a old reader from an SRCU-protected structure.
* @sp: srcu_struct in which to unregister the old reader.
@@ -209,6 +224,13 @@ static inline void srcu_read_unlock(struct srcu_struct *sp, int idx)
__srcu_read_unlock(sp, idx);
}
+/* Used by tracing, cannot be traced and cannot call lockdep. */
+static inline notrace void
+srcu_read_unlock_notrace(struct srcu_struct *sp, int idx) __releases(sp)
+{
+ __srcu_read_unlock(sp, idx);
+}
+
/**
* smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock
*
diff --git a/include/linux/ssb/ssb.h b/include/linux/ssb/ssb.h
index 3b43655cabe6..0d5a2691e7e9 100644
--- a/include/linux/ssb/ssb.h
+++ b/include/linux/ssb/ssb.h
@@ -499,11 +499,9 @@ struct ssb_bus {
/* Internal-only stuff follows. Do not touch. */
struct list_head list;
-#ifdef CONFIG_SSB_DEBUG
/* Is the bus already powered up? */
bool powered_up;
int power_warn_count;
-#endif /* DEBUG */
};
enum ssb_quirks {
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 32feac5bbd75..c43e9a01b892 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -190,5 +190,6 @@ struct plat_stmmacenet_data {
bool tso_en;
int mac_port_sel_speed;
bool en_tx_lpi_clockgating;
+ int has_xgmac;
};
#endif
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index d9af474a857d..58a6765c1c5e 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -125,7 +125,8 @@ struct rpc_authops {
struct module *owner;
rpc_authflavor_t au_flavor; /* flavor (RPC_AUTH_*) */
char * au_name;
- struct rpc_auth * (*create)(struct rpc_auth_create_args *, struct rpc_clnt *);
+ struct rpc_auth * (*create)(const struct rpc_auth_create_args *,
+ struct rpc_clnt *);
void (*destroy)(struct rpc_auth *);
int (*hash_cred)(struct auth_cred *, unsigned int);
@@ -174,7 +175,7 @@ struct rpc_cred * rpc_lookup_generic_cred(struct auth_cred *, int, gfp_t);
struct rpc_cred * rpc_lookup_machine_cred(const char *service_name);
int rpcauth_register(const struct rpc_authops *);
int rpcauth_unregister(const struct rpc_authops *);
-struct rpc_auth * rpcauth_create(struct rpc_auth_create_args *,
+struct rpc_auth * rpcauth_create(const struct rpc_auth_create_args *,
struct rpc_clnt *);
void rpcauth_release(struct rpc_auth *);
rpc_authflavor_t rpcauth_get_pseudoflavor(rpc_authflavor_t,
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index 9b11b6a0978c..73d5c4a870fa 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -156,6 +156,7 @@ int rpc_switch_client_transport(struct rpc_clnt *,
void rpc_shutdown_client(struct rpc_clnt *);
void rpc_release_client(struct rpc_clnt *);
+void rpc_task_release_transport(struct rpc_task *);
void rpc_task_release_client(struct rpc_task *);
int rpcb_create_local(struct net *);
diff --git a/include/linux/sunrpc/metrics.h b/include/linux/sunrpc/metrics.h
index 9baed7b355b2..1b3751327575 100644
--- a/include/linux/sunrpc/metrics.h
+++ b/include/linux/sunrpc/metrics.h
@@ -82,7 +82,7 @@ void rpc_count_iostats(const struct rpc_task *,
struct rpc_iostats *);
void rpc_count_iostats_metrics(const struct rpc_task *,
struct rpc_iostats *);
-void rpc_print_iostats(struct seq_file *, struct rpc_clnt *);
+void rpc_clnt_show_stats(struct seq_file *, struct rpc_clnt *);
void rpc_free_iostats(struct rpc_iostats *);
#else /* CONFIG_PROC_FS */
@@ -95,7 +95,7 @@ static inline void rpc_count_iostats_metrics(const struct rpc_task *task,
{
}
-static inline void rpc_print_iostats(struct seq_file *seq, struct rpc_clnt *clnt) {}
+static inline void rpc_clnt_show_stats(struct seq_file *seq, struct rpc_clnt *clnt) {}
static inline void rpc_free_iostats(struct rpc_iostats *stats) {}
#endif /* CONFIG_PROC_FS */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 574368e8a16f..73e130a840ce 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -496,9 +496,11 @@ void svc_reserve(struct svc_rqst *rqstp, int space);
struct svc_pool * svc_pool_for_cpu(struct svc_serv *serv, int cpu);
char * svc_print_addr(struct svc_rqst *, char *, size_t);
unsigned int svc_fill_write_vector(struct svc_rqst *rqstp,
+ struct page **pages,
struct kvec *first, size_t total);
char *svc_fill_symlink_pathname(struct svc_rqst *rqstp,
- struct kvec *first, size_t total);
+ struct kvec *first, void *p,
+ size_t total);
#define RPC_MAX_ADDRBUFLEN (63U)
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index 7c3656505847..04e404a07882 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -31,6 +31,7 @@ struct svc_cred {
/* name of form servicetype@hostname, passed down by
* rpc.svcgssd, or computed from the above: */
char *cr_principal;
+ char *cr_targ_princ;
struct gss_api_mech *cr_gss_mech;
};
@@ -39,6 +40,7 @@ static inline void init_svc_cred(struct svc_cred *cred)
cred->cr_group_info = NULL;
cred->cr_raw_principal = NULL;
cred->cr_principal = NULL;
+ cred->cr_targ_princ = NULL;
cred->cr_gss_mech = NULL;
}
@@ -48,6 +50,7 @@ static inline void free_svc_cred(struct svc_cred *cred)
put_group_info(cred->cr_group_info);
kfree(cred->cr_raw_principal);
kfree(cred->cr_principal);
+ kfree(cred->cr_targ_princ);
gss_mech_put(cred->cr_gss_mech);
init_svc_cred(cred);
}
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 440b62f7502e..5a28ac9284f0 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -414,7 +414,7 @@ static inline bool hibernation_available(void) { return false; }
#define PM_RESTORE_PREPARE 0x0005 /* Going to restore a saved image */
#define PM_POST_RESTORE 0x0006 /* Restore failed */
-extern struct mutex pm_mutex;
+extern struct mutex system_transition_mutex;
#ifdef CONFIG_PM_SLEEP
void save_processor_state(void);
diff --git a/include/linux/swait.h b/include/linux/swait.h
index bf8cb0dee23c..73e06e9986d4 100644
--- a/include/linux/swait.h
+++ b/include/linux/swait.h
@@ -16,7 +16,7 @@
* wait-queues, but the semantics are actually completely different, and
* every single user we have ever had has been buggy (or pointless).
*
- * A "swake_up()" only wakes up _one_ waiter, which is not at all what
+ * A "swake_up_one()" only wakes up _one_ waiter, which is not at all what
* "wake_up()" does, and has led to problems. In other cases, it has
* been fine, because there's only ever one waiter (kvm), but in that
* case gthe whole "simple" wait-queue is just pointless to begin with,
@@ -38,8 +38,8 @@
* all wakeups are TASK_NORMAL in order to avoid O(n) lookups for the right
* sleeper state.
*
- * - the exclusive mode; because this requires preserving the list order
- * and this is hard.
+ * - the !exclusive mode; because that leads to O(n) wakeups, everything is
+ * exclusive.
*
* - custom wake callback functions; because you cannot give any guarantees
* about random code. This also allows swait to be used in RT, such that
@@ -115,7 +115,7 @@ extern void __init_swait_queue_head(struct swait_queue_head *q, const char *name
* CPU0 - waker CPU1 - waiter
*
* for (;;) {
- * @cond = true; prepare_to_swait(&wq_head, &wait, state);
+ * @cond = true; prepare_to_swait_exclusive(&wq_head, &wait, state);
* smp_mb(); // smp_mb() from set_current_state()
* if (swait_active(wq_head)) if (@cond)
* wake_up(wq_head); break;
@@ -157,20 +157,20 @@ static inline bool swq_has_sleeper(struct swait_queue_head *wq)
return swait_active(wq);
}
-extern void swake_up(struct swait_queue_head *q);
+extern void swake_up_one(struct swait_queue_head *q);
extern void swake_up_all(struct swait_queue_head *q);
extern void swake_up_locked(struct swait_queue_head *q);
-extern void __prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait);
-extern void prepare_to_swait(struct swait_queue_head *q, struct swait_queue *wait, int state);
+extern void prepare_to_swait_exclusive(struct swait_queue_head *q, struct swait_queue *wait, int state);
extern long prepare_to_swait_event(struct swait_queue_head *q, struct swait_queue *wait, int state);
extern void __finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
-/* as per ___wait_event() but for swait, therefore "exclusive == 0" */
+/* as per ___wait_event() but for swait, therefore "exclusive == 1" */
#define ___swait_event(wq, condition, state, ret, cmd) \
({ \
+ __label__ __out; \
struct swait_queue __wait; \
long __ret = ret; \
\
@@ -183,20 +183,20 @@ extern void finish_swait(struct swait_queue_head *q, struct swait_queue *wait);
\
if (___wait_is_interruptible(state) && __int) { \
__ret = __int; \
- break; \
+ goto __out; \
} \
\
cmd; \
} \
finish_swait(&wq, &__wait); \
- __ret; \
+__out: __ret; \
})
#define __swait_event(wq, condition) \
(void)___swait_event(wq, condition, TASK_UNINTERRUPTIBLE, 0, \
schedule())
-#define swait_event(wq, condition) \
+#define swait_event_exclusive(wq, condition) \
do { \
if (condition) \
break; \
@@ -208,7 +208,7 @@ do { \
TASK_UNINTERRUPTIBLE, timeout, \
__ret = schedule_timeout(__ret))
-#define swait_event_timeout(wq, condition, timeout) \
+#define swait_event_timeout_exclusive(wq, condition, timeout) \
({ \
long __ret = timeout; \
if (!___wait_cond_timeout(condition)) \
@@ -220,7 +220,7 @@ do { \
___swait_event(wq, condition, TASK_INTERRUPTIBLE, 0, \
schedule())
-#define swait_event_interruptible(wq, condition) \
+#define swait_event_interruptible_exclusive(wq, condition) \
({ \
int __ret = 0; \
if (!(condition)) \
@@ -233,7 +233,7 @@ do { \
TASK_INTERRUPTIBLE, timeout, \
__ret = schedule_timeout(__ret))
-#define swait_event_interruptible_timeout(wq, condition, timeout) \
+#define swait_event_interruptible_timeout_exclusive(wq, condition, timeout)\
({ \
long __ret = timeout; \
if (!___wait_cond_timeout(condition)) \
@@ -246,7 +246,7 @@ do { \
(void)___swait_event(wq, condition, TASK_IDLE, 0, schedule())
/**
- * swait_event_idle - wait without system load contribution
+ * swait_event_idle_exclusive - wait without system load contribution
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
*
@@ -257,7 +257,7 @@ do { \
* condition and doesn't want to contribute to system load. Signals are
* ignored.
*/
-#define swait_event_idle(wq, condition) \
+#define swait_event_idle_exclusive(wq, condition) \
do { \
if (condition) \
break; \
@@ -270,7 +270,7 @@ do { \
__ret = schedule_timeout(__ret))
/**
- * swait_event_idle_timeout - wait up to timeout without load contribution
+ * swait_event_idle_timeout_exclusive - wait up to timeout without load contribution
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @timeout: timeout at which we'll give up in jiffies
@@ -288,7 +288,7 @@ do { \
* or the remaining jiffies (at least 1) if the @condition evaluated
* to %true before the @timeout elapsed.
*/
-#define swait_event_idle_timeout(wq, condition, timeout) \
+#define swait_event_idle_timeout_exclusive(wq, condition, timeout) \
({ \
long __ret = timeout; \
if (!___wait_cond_timeout(condition)) \
diff --git a/include/linux/swap.h b/include/linux/swap.h
index c063443d8638..8e2c11e692ba 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -447,7 +447,7 @@ extern void si_swapinfo(struct sysinfo *);
extern swp_entry_t get_swap_page(struct page *page);
extern void put_swap_page(struct page *page, swp_entry_t entry);
extern swp_entry_t get_swap_page_of_type(int);
-extern int get_swap_pages(int n, bool cluster, swp_entry_t swp_entries[]);
+extern int get_swap_pages(int n, swp_entry_t swp_entries[], int entry_size);
extern int add_swap_count_continuation(swp_entry_t, gfp_t);
extern void swap_shmem_alloc(swp_entry_t);
extern int swap_duplicate(swp_entry_t);
@@ -629,7 +629,6 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *memcg)
return memcg->swappiness;
}
-
#else
static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
{
@@ -637,6 +636,16 @@ static inline int mem_cgroup_swappiness(struct mem_cgroup *mem)
}
#endif
+#if defined(CONFIG_SWAP) && defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
+extern void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg, int node,
+ gfp_t gfp_mask);
+#else
+static inline void mem_cgroup_throttle_swaprate(struct mem_cgroup *memcg,
+ int node, gfp_t gfp_mask)
+{
+}
+#endif
+
#ifdef CONFIG_MEMCG_SWAP
extern void mem_cgroup_swapout(struct page *page, swp_entry_t entry);
extern int mem_cgroup_try_charge_swap(struct page *page, swp_entry_t entry);
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
index 06bd7b096167..e06febf62978 100644
--- a/include/linux/swapfile.h
+++ b/include/linux/swapfile.h
@@ -10,5 +10,7 @@ extern spinlock_t swap_lock;
extern struct plist_head swap_active_head;
extern struct swap_info_struct *swap_info[];
extern int try_to_unuse(unsigned int, bool, unsigned long);
+extern unsigned long generic_max_swapfile_size(void);
+extern unsigned long max_swapfile_size(void);
#endif /* _LINUX_SWAPFILE_H */
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 1d3877c39a00..22af9d8a84ae 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -4,6 +4,7 @@
#include <linux/radix-tree.h>
#include <linux/bug.h>
+#include <linux/mm_types.h>
/*
* swapcache pages are stored in the swapper_space radix tree. We want to
@@ -134,7 +135,7 @@ static inline struct page *device_private_entry_to_page(swp_entry_t entry)
return pfn_to_page(swp_offset(entry));
}
-int device_private_entry_fault(struct vm_area_struct *vma,
+vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
unsigned long addr,
swp_entry_t entry,
unsigned int flags,
@@ -169,7 +170,7 @@ static inline struct page *device_private_entry_to_page(swp_entry_t entry)
return NULL;
}
-static inline int device_private_entry_fault(struct vm_area_struct *vma,
+static inline vm_fault_t device_private_entry_fault(struct vm_area_struct *vma,
unsigned long addr,
swp_entry_t entry,
unsigned int flags,
@@ -340,11 +341,6 @@ static inline int is_hwpoison_entry(swp_entry_t entry)
return swp_type(entry) == SWP_HWPOISON;
}
-static inline bool test_set_page_hwpoison(struct page *page)
-{
- return TestSetPageHWPoison(page);
-}
-
static inline void num_poisoned_pages_inc(void)
{
atomic_long_inc(&num_poisoned_pages);
@@ -367,11 +363,6 @@ static inline int is_hwpoison_entry(swp_entry_t swp)
return 0;
}
-static inline bool test_set_page_hwpoison(struct page *page)
-{
- return false;
-}
-
static inline void num_poisoned_pages_inc(void)
{
}
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
index ec93e93371fa..ab400af6f0ce 100644
--- a/include/linux/switchtec.h
+++ b/include/linux/switchtec.h
@@ -19,10 +19,6 @@
#include <linux/pci.h>
#include <linux/cdev.h>
-#define MICROSEMI_VENDOR_ID 0x11f8
-#define MICROSEMI_NTB_CLASSCODE 0x068000
-#define MICROSEMI_MGMT_CLASSCODE 0x058000
-
#define SWITCHTEC_MRPC_PAYLOAD_SIZE 1024
#define SWITCHTEC_MAX_PFF_CSR 48
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 73810808cdf2..2ff814c92f7f 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -11,6 +11,7 @@
#ifndef _LINUX_SYSCALLS_H
#define _LINUX_SYSCALLS_H
+struct __aio_sigset;
struct epoll_event;
struct iattr;
struct inode;
@@ -80,6 +81,7 @@ union bpf_attr;
#include <linux/unistd.h>
#include <linux/quota.h>
#include <linux/key.h>
+#include <linux/personality.h>
#include <trace/syscall.h>
#ifdef CONFIG_ARCH_HAS_SYSCALL_WRAPPER
@@ -231,6 +233,9 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
*/
#ifndef __SYSCALL_DEFINEx
#define __SYSCALL_DEFINEx(x, name, ...) \
+ __diag_push(); \
+ __diag_ignore(GCC, 8, "-Wattribute-alias", \
+ "Type aliasing is used to sanitize syscall arguments");\
asmlinkage long sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)) \
__attribute__((alias(__stringify(__se_sys##name)))); \
ALLOW_ERROR_INJECTION(sys##name, ERRNO); \
@@ -243,6 +248,7 @@ static inline int is_syscall_trace_event(struct trace_event_call *tp_event)
__PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
return ret; \
} \
+ __diag_pop(); \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
#endif /* __SYSCALL_DEFINEx */
@@ -501,9 +507,9 @@ asmlinkage long sys_sync_file_range(int fd, loff_t offset, loff_t nbytes,
/* fs/timerfd.c */
asmlinkage long sys_timerfd_create(int clockid, int flags);
asmlinkage long sys_timerfd_settime(int ufd, int flags,
- const struct itimerspec __user *utmr,
- struct itimerspec __user *otmr);
-asmlinkage long sys_timerfd_gettime(int ufd, struct itimerspec __user *otmr);
+ const struct __kernel_itimerspec __user *utmr,
+ struct __kernel_itimerspec __user *otmr);
+asmlinkage long sys_timerfd_gettime(int ufd, struct __kernel_itimerspec __user *otmr);
/* fs/utimes.c */
asmlinkage long sys_utimensat(int dfd, const char __user *filename,
@@ -568,10 +574,10 @@ asmlinkage long sys_timer_create(clockid_t which_clock,
struct sigevent __user *timer_event_spec,
timer_t __user * created_timer_id);
asmlinkage long sys_timer_gettime(timer_t timer_id,
- struct itimerspec __user *setting);
+ struct __kernel_itimerspec __user *setting);
asmlinkage long sys_timer_getoverrun(timer_t timer_id);
asmlinkage long sys_timer_settime(timer_t timer_id, int flags,
- const struct itimerspec __user *new_setting,
+ const struct __kernel_itimerspec __user *new_setting,
struct itimerspec __user *old_setting);
asmlinkage long sys_timer_delete(timer_t timer_id);
asmlinkage long sys_clock_settime(clockid_t which_clock,
@@ -1277,4 +1283,14 @@ static inline long ksys_truncate(const char __user *pathname, loff_t length)
return do_sys_truncate(pathname, length);
}
+static inline unsigned int ksys_personality(unsigned int personality)
+{
+ unsigned int old = current->personality;
+
+ if (personality != 0xffffffff)
+ set_personality(personality);
+
+ return old;
+}
+
#endif
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index b8bfdc173ec0..987cefa337de 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -91,9 +91,9 @@ struct attribute_group {
struct bin_attribute **bin_attrs;
};
-/**
- * Use these macros to make defining attributes easier. See include/linux/device.h
- * for examples..
+/*
+ * Use these macros to make defining attributes easier.
+ * See include/linux/device.h for examples..
*/
#define SYSFS_PREALLOC 010000
@@ -237,6 +237,9 @@ int __must_check sysfs_create_files(struct kobject *kobj,
const struct attribute **attr);
int __must_check sysfs_chmod_file(struct kobject *kobj,
const struct attribute *attr, umode_t mode);
+struct kernfs_node *sysfs_break_active_protection(struct kobject *kobj,
+ const struct attribute *attr);
+void sysfs_unbreak_active_protection(struct kernfs_node *kn);
void sysfs_remove_file_ns(struct kobject *kobj, const struct attribute *attr,
const void *ns);
bool sysfs_remove_file_self(struct kobject *kobj, const struct attribute *attr);
@@ -350,6 +353,17 @@ static inline int sysfs_chmod_file(struct kobject *kobj,
return 0;
}
+static inline struct kernfs_node *
+sysfs_break_active_protection(struct kobject *kobj,
+ const struct attribute *attr)
+{
+ return NULL;
+}
+
+static inline void sysfs_unbreak_active_protection(struct kernfs_node *kn)
+{
+}
+
static inline void sysfs_remove_file_ns(struct kobject *kobj,
const struct attribute *attr,
const void *ns)
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index c6aa8a3c42ed..b9626aa7e90c 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -37,9 +37,33 @@ struct t10_pi_tuple {
#define T10_PI_APP_ESCAPE cpu_to_be16(0xffff)
#define T10_PI_REF_ESCAPE cpu_to_be32(0xffffffff)
+static inline u32 t10_pi_ref_tag(struct request *rq)
+{
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+ return blk_rq_pos(rq) >>
+ (rq->q->integrity.interval_exp - 9) & 0xffffffff;
+#else
+ return -1U;
+#endif
+}
+
extern const struct blk_integrity_profile t10_pi_type1_crc;
extern const struct blk_integrity_profile t10_pi_type1_ip;
extern const struct blk_integrity_profile t10_pi_type3_crc;
extern const struct blk_integrity_profile t10_pi_type3_ip;
+#ifdef CONFIG_BLK_DEV_INTEGRITY
+extern void t10_pi_prepare(struct request *rq, u8 protection_type);
+extern void t10_pi_complete(struct request *rq, u8 protection_type,
+ unsigned int intervals);
+#else
+static inline void t10_pi_complete(struct request *rq, u8 protection_type,
+ unsigned int intervals)
+{
+}
+static inline void t10_pi_prepare(struct request *rq, u8 protection_type)
+{
+}
+#endif
+
#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 72705eaf4b84..263e37271afd 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -89,7 +89,7 @@ struct tcp_sack_block {
struct tcp_options_received {
/* PAWS/RTTM data */
- long ts_recent_stamp;/* Time we stored ts_recent (for aging) */
+ int ts_recent_stamp;/* Time we stored ts_recent (for aging) */
u32 ts_recent; /* Time stamp to echo next */
u32 rcv_tsval; /* Time stamp value */
u32 rcv_tsecr; /* Time stamp echo reply */
@@ -181,10 +181,16 @@ struct tcp_sock {
u32 data_segs_out; /* RFC4898 tcpEStatsPerfDataSegsOut
* total number of data segments sent.
*/
+ u64 bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut
+ * total number of data bytes sent.
+ */
u64 bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked
* sum(delta(snd_una)), or how many bytes
* were acked.
*/
+ u32 dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups
+ * total number of DSACK blocks received
+ */
u32 snd_una; /* First byte we want an ack for */
u32 snd_sml; /* Last byte of the most recently transmitted small packet */
u32 rcv_tstamp; /* timestamp of last received ACK (for keepalives) */
@@ -214,8 +220,7 @@ struct tcp_sock {
#define TCP_RACK_RECOVERY_THRESH 16
u8 reo_wnd_persist:5, /* No. of recovery since last adj */
dsack_seen:1, /* Whether DSACK seen after last adj */
- advanced:1, /* mstamp advanced since last lost marking */
- reord:1; /* reordering detected */
+ advanced:1; /* mstamp advanced since last lost marking */
} rack;
u16 advmss; /* Advertised MSS */
u8 compressed_ack;
@@ -261,6 +266,7 @@ struct tcp_sock {
u8 ecn_flags; /* ECN status bits. */
u8 keepalive_probes; /* num of allowed keep alive probes */
u32 reordering; /* Packet reordering metric. */
+ u32 reord_seen; /* number of data packet reordering events */
u32 snd_up; /* Urgent pointer */
/*
@@ -330,6 +336,9 @@ struct tcp_sock {
* the first SYN. */
u32 undo_marker; /* snd_una upon a new recovery episode. */
int undo_retrans; /* number of undoable retransmissions. */
+ u64 bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans
+ * Total data bytes retransmitted
+ */
u32 total_retrans; /* Total retransmits for entire connection */
u32 urg_seq; /* Seq of received urgent pointer */
@@ -350,6 +359,7 @@ struct tcp_sock {
#endif
/* Receiver side RTT estimation */
+ u32 rcv_rtt_last_tsecr;
struct {
u32 rtt_us;
u32 seq;
@@ -425,7 +435,7 @@ struct tcp_timewait_sock {
/* The time we sent the last out-of-window ACK: */
u32 tw_last_oow_ack_time;
- long tw_ts_recent_stamp;
+ int tw_ts_recent_stamp;
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *tw_md5_key;
#endif
diff --git a/include/linux/time.h b/include/linux/time.h
index aed74463592d..27d83fd2ae61 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -14,9 +14,9 @@ int get_timespec64(struct timespec64 *ts,
int put_timespec64(const struct timespec64 *ts,
struct __kernel_timespec __user *uts);
int get_itimerspec64(struct itimerspec64 *it,
- const struct itimerspec __user *uit);
+ const struct __kernel_itimerspec __user *uit);
int put_itimerspec64(const struct itimerspec64 *it,
- struct itimerspec __user *uit);
+ struct __kernel_itimerspec __user *uit);
extern time64_t mktime64(const unsigned int year, const unsigned int mon,
const unsigned int day, const unsigned int hour,
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 0a7b2f79cec7..05634afba0db 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -12,6 +12,7 @@ typedef __u64 timeu64_t;
*/
#ifndef CONFIG_64BIT_TIME
#define __kernel_timespec timespec
+#define __kernel_itimerspec itimerspec
#endif
#include <uapi/linux/time.h>
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index 86bc2026efce..5d738804e3d6 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -21,6 +21,21 @@ extern int do_sys_settimeofday64(const struct timespec64 *tv,
const struct timezone *tz);
/*
+ * ktime_get() family: read the current time in a multitude of ways,
+ *
+ * The default time reference is CLOCK_MONOTONIC, starting at
+ * boot time but not counting the time spent in suspend.
+ * For other references, use the functions with "real", "clocktai",
+ * "boottime" and "raw" suffixes.
+ *
+ * To get the time in a different format, use the ones wit
+ * "ns", "ts64" and "seconds" suffix.
+ *
+ * See Documentation/core-api/timekeeping.rst for more details.
+ */
+
+
+/*
* timespec64 based interfaces
*/
extern void ktime_get_raw_ts64(struct timespec64 *ts);
@@ -177,7 +192,7 @@ static inline time64_t ktime_get_clocktai_seconds(void)
extern bool timekeeping_rtc_skipsuspend(void);
extern bool timekeeping_rtc_skipresume(void);
-extern void timekeeping_inject_sleeptime64(struct timespec64 *delta);
+extern void timekeeping_inject_sleeptime64(const struct timespec64 *delta);
/*
* struct system_time_snapshot - simultaneous raw/real time capture with
@@ -243,7 +258,8 @@ extern void ktime_get_snapshot(struct system_time_snapshot *systime_snapshot);
extern int persistent_clock_is_local;
extern void read_persistent_clock64(struct timespec64 *ts);
-extern void read_boot_clock64(struct timespec64 *ts);
+void read_persistent_clock_and_boot_offset(struct timespec64 *wall_clock,
+ struct timespec64 *boot_offset);
extern int update_persistent_clock64(struct timespec64 now);
/*
diff --git a/include/linux/torture.h b/include/linux/torture.h
index 66272862070b..61dfd93b6ee4 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -64,6 +64,8 @@ struct torture_random_state {
long trs_count;
};
#define DEFINE_TORTURE_RANDOM(name) struct torture_random_state name = { 0, 0 }
+#define DEFINE_TORTURE_RANDOM_PERCPU(name) \
+ DEFINE_PER_CPU(struct torture_random_state, name)
unsigned long torture_random(struct torture_random_state *trsp);
/* Task shuffler, which causes CPUs to occasionally go idle. */
@@ -79,7 +81,7 @@ void stutter_wait(const char *title);
int torture_stutter_init(int s);
/* Initialization and cleanup. */
-bool torture_init_begin(char *ttype, bool v);
+bool torture_init_begin(char *ttype, int v);
void torture_init_end(void);
bool torture_cleanup_begin(void);
void torture_cleanup_end(void);
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 06639fb6ab85..4609b94142d4 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -43,6 +43,8 @@ struct tpm_class_ops {
u8 (*status) (struct tpm_chip *chip);
bool (*update_timeouts)(struct tpm_chip *chip,
unsigned long *timeout_cap);
+ int (*go_idle)(struct tpm_chip *chip);
+ int (*cmd_ready)(struct tpm_chip *chip);
int (*request_locality)(struct tpm_chip *chip, int loc);
int (*relinquish_locality)(struct tpm_chip *chip, int loc);
void (*clk_enable)(struct tpm_chip *chip, bool value);
@@ -61,6 +63,7 @@ extern int tpm_seal_trusted(struct tpm_chip *chip,
extern int tpm_unseal_trusted(struct tpm_chip *chip,
struct trusted_key_payload *payload,
struct trusted_key_options *options);
+extern struct tpm_chip *tpm_default_chip(void);
#else
static inline int tpm_is_tpm2(struct tpm_chip *chip)
{
@@ -96,5 +99,9 @@ static inline int tpm_unseal_trusted(struct tpm_chip *chip,
{
return -ENODEV;
}
+static inline struct tpm_chip *tpm_default_chip(void)
+{
+ return NULL;
+}
#endif
#endif
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 4a8841963c2e..05589a3e37f4 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -51,6 +51,7 @@
#include <linux/security.h>
#include <linux/task_work.h>
#include <linux/memcontrol.h>
+#include <linux/blk-cgroup.h>
struct linux_binprm;
/*
@@ -192,6 +193,7 @@ static inline void tracehook_notify_resume(struct pt_regs *regs)
task_work_run();
mem_cgroup_handle_over_high();
+ blkcg_maybe_throttle_current();
}
#endif /* <linux/tracehook.h> */
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index 19a690b559ca..7f2e16e76ac4 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -15,6 +15,7 @@
*/
#include <linux/smp.h>
+#include <linux/srcu.h>
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/cpumask.h>
@@ -33,6 +34,8 @@ struct trace_eval_map {
#define TRACEPOINT_DEFAULT_PRIO 10
+extern struct srcu_struct tracepoint_srcu;
+
extern int
tracepoint_probe_register(struct tracepoint *tp, void *probe, void *data);
extern int
@@ -75,10 +78,16 @@ int unregister_tracepoint_module_notifier(struct notifier_block *nb)
* probe unregistration and the end of module exit to make sure there is no
* caller executing a probe when it is freed.
*/
+#ifdef CONFIG_TRACEPOINTS
static inline void tracepoint_synchronize_unregister(void)
{
+ synchronize_srcu(&tracepoint_srcu);
synchronize_sched();
}
+#else
+static inline void tracepoint_synchronize_unregister(void)
+{ }
+#endif
#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
extern int syscall_regfunc(void);
@@ -129,18 +138,31 @@ extern void syscall_unregfunc(void);
* as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
* "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
*/
-#define __DO_TRACE(tp, proto, args, cond, rcucheck) \
+#define __DO_TRACE(tp, proto, args, cond, rcuidle) \
do { \
struct tracepoint_func *it_func_ptr; \
void *it_func; \
void *__data; \
+ int __maybe_unused idx = 0; \
\
if (!(cond)) \
return; \
- if (rcucheck) \
- rcu_irq_enter_irqson(); \
- rcu_read_lock_sched_notrace(); \
- it_func_ptr = rcu_dereference_sched((tp)->funcs); \
+ \
+ /* srcu can't be used from NMI */ \
+ WARN_ON_ONCE(rcuidle && in_nmi()); \
+ \
+ /* keep srcu and sched-rcu usage consistent */ \
+ preempt_disable_notrace(); \
+ \
+ /* \
+ * For rcuidle callers, use srcu since sched-rcu \
+ * doesn't work from the idle path. \
+ */ \
+ if (rcuidle) \
+ idx = srcu_read_lock_notrace(&tracepoint_srcu); \
+ \
+ it_func_ptr = rcu_dereference_raw((tp)->funcs); \
+ \
if (it_func_ptr) { \
do { \
it_func = (it_func_ptr)->func; \
@@ -148,9 +170,11 @@ extern void syscall_unregfunc(void);
((void(*)(proto))(it_func))(args); \
} while ((++it_func_ptr)->func); \
} \
- rcu_read_unlock_sched_notrace(); \
- if (rcucheck) \
- rcu_irq_exit_irqson(); \
+ \
+ if (rcuidle) \
+ srcu_read_unlock_notrace(&tracepoint_srcu, idx);\
+ \
+ preempt_enable_notrace(); \
} while (0)
#ifndef MODULE
@@ -225,6 +249,19 @@ extern void syscall_unregfunc(void);
return static_key_false(&__tracepoint_##name.key); \
}
+#ifdef CONFIG_HAVE_ARCH_PREL32_RELOCATIONS
+#define __TRACEPOINT_ENTRY(name) \
+ asm(" .section \"__tracepoints_ptrs\", \"a\" \n" \
+ " .balign 4 \n" \
+ " .long __tracepoint_" #name " - . \n" \
+ " .previous \n")
+#else
+#define __TRACEPOINT_ENTRY(name) \
+ static struct tracepoint * const __tracepoint_ptr_##name __used \
+ __attribute__((section("__tracepoints_ptrs"))) = \
+ &__tracepoint_##name
+#endif
+
/*
* We have no guarantee that gcc and the linker won't up-align the tracepoint
* structures, so we create an array of pointers that will be used for iteration
@@ -234,11 +271,9 @@ extern void syscall_unregfunc(void);
static const char __tpstrtab_##name[] \
__attribute__((section("__tracepoints_strings"))) = #name; \
struct tracepoint __tracepoint_##name \
- __attribute__((section("__tracepoints"))) = \
+ __attribute__((section("__tracepoints"), used)) = \
{ __tpstrtab_##name, STATIC_KEY_INIT_FALSE, reg, unreg, NULL };\
- static struct tracepoint * const __tracepoint_ptr_##name __used \
- __attribute__((section("__tracepoints_ptrs"))) = \
- &__tracepoint_##name;
+ __TRACEPOINT_ENTRY(name);
#define DEFINE_TRACE(name) \
DEFINE_TRACE_FN(name, NULL, NULL);
diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
index 1ef64d4ad887..840894ca3fc0 100644
--- a/include/linux/tty_ldisc.h
+++ b/include/linux/tty_ldisc.h
@@ -119,13 +119,13 @@
#include <linux/fs.h>
#include <linux/wait.h>
-
+#include <linux/atomic.h>
/*
* the semaphore definition
*/
struct ld_semaphore {
- long count;
+ atomic_long_t count;
raw_spinlock_t wait_lock;
unsigned int wait_readers;
struct list_head read_wait;
diff --git a/include/linux/udp.h b/include/linux/udp.h
index ca840345571b..320d49d85484 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -74,8 +74,8 @@ struct udp_sock {
void (*encap_destroy)(struct sock *sk);
/* GRO functions for UDP socket */
- struct sk_buff ** (*gro_receive)(struct sock *sk,
- struct sk_buff **head,
+ struct sk_buff * (*gro_receive)(struct sock *sk,
+ struct list_head *head,
struct sk_buff *skb);
int (*gro_complete)(struct sock *sk,
struct sk_buff *skb,
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 6c5f2074e14f..6f8b68cd460f 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -75,7 +75,7 @@ struct uio_device {
struct fasync_struct *async_queue;
wait_queue_head_t wait;
struct uio_info *info;
- spinlock_t info_lock;
+ struct mutex info_lock;
struct kobject *map_dir;
struct kobject *portio_dir;
};
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 0a294e950df8..bb9d2084af03 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -121,7 +121,7 @@ extern bool is_swbp_insn(uprobe_opcode_t *insn);
extern bool is_trap_insn(uprobe_opcode_t *insn);
extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
-extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
+extern int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
extern int uprobe_apply(struct inode *inode, loff_t offset, struct uprobe_consumer *uc, bool);
extern void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
diff --git a/include/linux/usb/audio-v3.h b/include/linux/usb/audio-v3.h
index a710e28b5215..6b708434b7f9 100644
--- a/include/linux/usb/audio-v3.h
+++ b/include/linux/usb/audio-v3.h
@@ -387,6 +387,12 @@ struct uac3_interrupt_data_msg {
#define UAC3_CONNECTORS 0x0f
#define UAC3_POWER_DOMAIN 0x10
+/* A.20 PROCESSING UNIT PROCESS TYPES */
+#define UAC3_PROCESS_UNDEFINED 0x00
+#define UAC3_PROCESS_UP_DOWNMIX 0x01
+#define UAC3_PROCESS_STEREO_EXTENDER 0x02
+#define UAC3_PROCESS_MULTI_FUNCTION 0x03
+
/* A.22 AUDIO CLASS-SPECIFIC REQUEST CODES */
/* see audio-v2.h for the rest, which is identical to v2 */
#define UAC3_CS_REQ_INTEN 0x04
@@ -406,6 +412,15 @@ struct uac3_interrupt_data_msg {
#define UAC3_TE_OVERFLOW 0x04
#define UAC3_TE_LATENCY 0x05
+/* A.23.10 PROCESSING UNITS CONTROL SELECTROS */
+
+/* Up/Down Mixer */
+#define UAC3_UD_MODE_SELECT 0x01
+
+/* Stereo Extender */
+#define UAC3_EXT_WIDTH_CONTROL 0x01
+
+
/* BADD predefined Unit/Terminal values */
#define UAC3_BADD_IT_ID1 1 /* Input Terminal ID1: bTerminalID = 1 */
#define UAC3_BADD_FU_ID2 2 /* Feature Unit ID2: bUnitID = 2 */
@@ -432,4 +447,8 @@ struct uac3_interrupt_data_msg {
/* BADD sample rate is always fixed to 48kHz */
#define UAC3_BADD_SAMPLING_RATE 48000
+/* BADD power domains recovery times in 50us increments */
+#define UAC3_BADD_PD_RECOVER_D1D0 0x0258 /* 30ms */
+#define UAC3_BADD_PD_RECOVER_D2D0 0x1770 /* 300ms */
+
#endif /* __LINUX_USB_AUDIO_V3_H */
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 34a6ded6f319..97e2ddec18b1 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -322,6 +322,7 @@ struct hc_driver {
int (*bus_suspend)(struct usb_hcd *);
int (*bus_resume)(struct usb_hcd *);
int (*start_port_reset)(struct usb_hcd *, unsigned port_num);
+ unsigned long (*get_resuming_ports)(struct usb_hcd *);
/* force handover of high-speed port to full-speed companion */
void (*relinquish_port)(struct usb_hcd *, int);
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index 09b570feb297..f2162e0fe531 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -15,6 +15,7 @@
#ifndef __LINUX_USB_PD_H
#define __LINUX_USB_PD_H
+#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/usb/typec.h>
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index b231b9314240..7e7fbfb84e8e 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -98,18 +98,10 @@ struct tcpc_config {
#define TCPC_MUX_DP_ENABLED BIT(1) /* DP enabled */
#define TCPC_MUX_POLARITY_INVERTED BIT(2) /* Polarity inverted */
-/* Mux modes, decoded to attributes */
-enum tcpc_mux_mode {
- TYPEC_MUX_NONE = 0, /* Open switch */
- TYPEC_MUX_USB = TCPC_MUX_USB_ENABLED, /* USB only */
- TYPEC_MUX_DP = TCPC_MUX_DP_ENABLED, /* DP only */
- TYPEC_MUX_DOCK = TCPC_MUX_USB_ENABLED | /* Both USB and DP */
- TCPC_MUX_DP_ENABLED,
-};
-
/**
* struct tcpc_dev - Port configuration and callback functions
* @config: Pointer to port configuration
+ * @fwnode: Pointer to port fwnode
* @get_vbus: Called to read current VBUS state
* @get_current_limit:
* Optional; called by the tcpm core when configured as a snk
@@ -138,6 +130,7 @@ enum tcpc_mux_mode {
*/
struct tcpc_dev {
const struct tcpc_config *config;
+ struct fwnode_handle *fwnode;
int (*init)(struct tcpc_dev *dev);
int (*get_vbus)(struct tcpc_dev *dev);
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index 672b39bb0adc..7df4ecabc78a 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -5,21 +5,18 @@
#include <linux/types.h>
-/* XXX: Once we have a header for USB Power Delivery, this belongs there */
-#define ALTMODE_MAX_MODES 6
-
/* USB Type-C Specification releases */
#define USB_TYPEC_REV_1_0 0x100 /* 1.0 */
#define USB_TYPEC_REV_1_1 0x110 /* 1.1 */
#define USB_TYPEC_REV_1_2 0x120 /* 1.2 */
-struct typec_altmode;
struct typec_partner;
struct typec_cable;
struct typec_plug;
struct typec_port;
struct fwnode_handle;
+struct device;
enum typec_port_type {
TYPEC_PORT_SRC,
@@ -93,39 +90,21 @@ int typec_partner_set_identity(struct typec_partner *partner);
int typec_cable_set_identity(struct typec_cable *cable);
/*
- * struct typec_mode_desc - Individual Mode of an Alternate Mode
- * @index: Index of the Mode within the SVID
- * @vdo: VDO returned by Discover Modes USB PD command
- * @desc: Optional human readable description of the mode
- * @roles: Only for ports. DRP if the mode is available in both roles
- *
- * Description of a mode of an Alternate Mode which a connector, cable plug or
- * partner supports. Every mode will have it's own sysfs group. The details are
- * the VDO returned by discover modes command, description for the mode and
- * active flag telling has the mode being entered or not.
- */
-struct typec_mode_desc {
- int index;
- u32 vdo;
- char *desc;
- /* Only used with ports */
- enum typec_port_type roles;
-};
-
-/*
* struct typec_altmode_desc - USB Type-C Alternate Mode Descriptor
* @svid: Standard or Vendor ID
- * @n_modes: Number of modes
- * @modes: Array of modes supported by the Alternate Mode
+ * @mode: Index of the Mode
+ * @vdo: VDO returned by Discover Modes USB PD command
+ * @roles: Only for ports. DRP if the mode is available in both roles
*
- * Representation of an Alternate Mode that has SVID assigned by USB-IF. The
- * array of modes will list the modes of a particular SVID that are supported by
- * a connector, partner of a cable plug.
+ * Description of an Alternate Mode which a connector, cable plug or partner
+ * supports.
*/
struct typec_altmode_desc {
u16 svid;
- int n_modes;
- struct typec_mode_desc modes[ALTMODE_MAX_MODES];
+ u8 mode;
+ u32 vdo;
+ /* Only used with ports */
+ enum typec_port_data roles;
};
struct typec_altmode
@@ -141,8 +120,7 @@ void typec_unregister_altmode(struct typec_altmode *altmode);
struct typec_port *typec_altmode2port(struct typec_altmode *alt);
-void typec_altmode_update_active(struct typec_altmode *alt, int mode,
- bool active);
+void typec_altmode_update_active(struct typec_altmode *alt, bool active);
enum typec_plug_index {
TYPEC_PLUG_SOP_P,
@@ -205,7 +183,6 @@ struct typec_partner_desc {
* @dr_set: Set Data Role
* @pr_set: Set Power Role
* @vconn_set: Set VCONN Role
- * @activate_mode: Enter/exit given Alternate Mode
* @port_type_set: Set port type
*
* Static capabilities of a single USB Type-C port.
@@ -231,12 +208,8 @@ struct typec_capability {
enum typec_role);
int (*vconn_set)(const struct typec_capability *,
enum typec_role);
-
- int (*activate_mode)(const struct typec_capability *,
- int mode, int activate);
int (*port_type_set)(const struct typec_capability *,
- enum typec_port_type);
-
+ enum typec_port_type);
};
/* Specific to try_role(). Indicates the user want's to clear the preference. */
@@ -265,6 +238,10 @@ void typec_set_pwr_opmode(struct typec_port *port, enum typec_pwr_opmode mode);
int typec_set_orientation(struct typec_port *port,
enum typec_orientation orientation);
+enum typec_orientation typec_get_orientation(struct typec_port *port);
int typec_set_mode(struct typec_port *port, int mode);
+int typec_find_port_power_role(const char *name);
+int typec_find_power_role(const char *name);
+int typec_find_port_data_role(const char *name);
#endif /* __LINUX_USB_TYPEC_H */
diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h
new file mode 100644
index 000000000000..9a88c74a1d0d
--- /dev/null
+++ b/include/linux/usb/typec_altmode.h
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __USB_TYPEC_ALTMODE_H
+#define __USB_TYPEC_ALTMODE_H
+
+#include <linux/mod_devicetable.h>
+#include <linux/usb/typec.h>
+#include <linux/device.h>
+
+#define MODE_DISCOVERY_MAX 6
+
+struct typec_altmode_ops;
+
+/**
+ * struct typec_altmode - USB Type-C alternate mode device
+ * @dev: Driver model's view of this device
+ * @svid: Standard or Vendor ID (SVID) of the alternate mode
+ * @mode: Index of the Mode
+ * @vdo: VDO returned by Discover Modes USB PD command
+ * @active: Tells has the mode been entered or not
+ * @desc: Optional human readable description of the mode
+ * @ops: Operations vector from the driver
+ */
+struct typec_altmode {
+ struct device dev;
+ u16 svid;
+ int mode;
+ u32 vdo;
+ unsigned int active:1;
+
+ char *desc;
+ const struct typec_altmode_ops *ops;
+};
+
+#define to_typec_altmode(d) container_of(d, struct typec_altmode, dev)
+
+static inline void typec_altmode_set_drvdata(struct typec_altmode *altmode,
+ void *data)
+{
+ dev_set_drvdata(&altmode->dev, data);
+}
+
+static inline void *typec_altmode_get_drvdata(struct typec_altmode *altmode)
+{
+ return dev_get_drvdata(&altmode->dev);
+}
+
+/**
+ * struct typec_altmode_ops - Alternate mode specific operations vector
+ * @enter: Operations to be executed with Enter Mode Command
+ * @exit: Operations to be executed with Exit Mode Command
+ * @attention: Callback for Attention Command
+ * @vdm: Callback for SVID specific commands
+ * @notify: Communication channel for platform and the alternate mode
+ * @activate: User callback for Enter/Exit Mode
+ */
+struct typec_altmode_ops {
+ int (*enter)(struct typec_altmode *altmode);
+ int (*exit)(struct typec_altmode *altmode);
+ void (*attention)(struct typec_altmode *altmode, u32 vdo);
+ int (*vdm)(struct typec_altmode *altmode, const u32 hdr,
+ const u32 *vdo, int cnt);
+ int (*notify)(struct typec_altmode *altmode, unsigned long conf,
+ void *data);
+ int (*activate)(struct typec_altmode *altmode, int activate);
+};
+
+int typec_altmode_enter(struct typec_altmode *altmode);
+int typec_altmode_exit(struct typec_altmode *altmode);
+void typec_altmode_attention(struct typec_altmode *altmode, u32 vdo);
+int typec_altmode_vdm(struct typec_altmode *altmode,
+ const u32 header, const u32 *vdo, int count);
+int typec_altmode_notify(struct typec_altmode *altmode, unsigned long conf,
+ void *data);
+const struct typec_altmode *
+typec_altmode_get_partner(struct typec_altmode *altmode);
+
+/*
+ * These are the connector states (USB, Safe and Alt Mode) defined in USB Type-C
+ * Specification. SVID specific connector states are expected to follow and
+ * start from the value TYPEC_STATE_MODAL.
+ */
+enum {
+ TYPEC_STATE_SAFE, /* USB Safe State */
+ TYPEC_STATE_USB, /* USB Operation */
+ TYPEC_STATE_MODAL, /* Alternate Modes */
+};
+
+/*
+ * For the muxes there is no difference between Accessory Modes and Alternate
+ * Modes, so the Accessory Modes are supplied with specific modal state values
+ * here. Unlike with Alternate Modes, where the mux will be linked with the
+ * alternate mode device, the mux for Accessory Modes will be linked with the
+ * port device instead.
+ *
+ * Port drivers can use TYPEC_MODE_AUDIO and TYPEC_MODE_DEBUG as the mode
+ * value for typec_set_mode() when accessory modes are supported.
+ */
+enum {
+ TYPEC_MODE_AUDIO = TYPEC_STATE_MODAL, /* Audio Accessory */
+ TYPEC_MODE_DEBUG, /* Debug Accessory */
+};
+
+#define TYPEC_MODAL_STATE(_state_) ((_state_) + TYPEC_STATE_MODAL)
+
+struct typec_altmode *typec_altmode_get_plug(struct typec_altmode *altmode,
+ enum typec_plug_index index);
+void typec_altmode_put_plug(struct typec_altmode *plug);
+
+struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes,
+ size_t n, u16 svid, u8 mode);
+
+struct typec_altmode *
+typec_altmode_register_notifier(struct device *dev, u16 svid, u8 mode,
+ struct notifier_block *nb);
+
+void typec_altmode_unregister_notifier(struct typec_altmode *adev,
+ struct notifier_block *nb);
+
+/**
+ * typec_altmode_get_orientation - Get cable plug orientation
+ * altmode: Handle to the alternate mode
+ */
+static inline enum typec_orientation
+typec_altmode_get_orientation(struct typec_altmode *altmode)
+{
+ return typec_get_orientation(typec_altmode2port(altmode));
+}
+
+/**
+ * struct typec_altmode_driver - USB Type-C alternate mode device driver
+ * @id_table: Null terminated array of SVIDs
+ * @probe: Callback for device binding
+ * @remove: Callback for device unbinding
+ * @driver: Device driver model driver
+ *
+ * These drivers will be bind to the partner alternate mode devices. They will
+ * handle all SVID specific communication.
+ */
+struct typec_altmode_driver {
+ const struct typec_device_id *id_table;
+ int (*probe)(struct typec_altmode *altmode);
+ void (*remove)(struct typec_altmode *altmode);
+ struct device_driver driver;
+};
+
+#define to_altmode_driver(d) container_of(d, struct typec_altmode_driver, \
+ driver)
+
+#define typec_altmode_register_driver(drv) \
+ __typec_altmode_register_driver(drv, THIS_MODULE)
+int __typec_altmode_register_driver(struct typec_altmode_driver *drv,
+ struct module *module);
+void typec_altmode_unregister_driver(struct typec_altmode_driver *drv);
+
+#define module_typec_altmode_driver(__typec_altmode_driver) \
+ module_driver(__typec_altmode_driver, typec_altmode_register_driver, \
+ typec_altmode_unregister_driver)
+
+#endif /* __USB_TYPEC_ALTMODE_H */
diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h
new file mode 100644
index 000000000000..55ae781d60a9
--- /dev/null
+++ b/include/linux/usb/typec_dp.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __USB_TYPEC_DP_H
+#define __USB_TYPEC_DP_H
+
+#include <linux/usb/typec_altmode.h>
+
+#define USB_TYPEC_DP_SID 0xff01
+#define USB_TYPEC_DP_MODE 1
+
+/*
+ * Connector states matching the pin assignments in DisplayPort Alt Mode
+ * Specification.
+ *
+ * These values are meant primarily to be used by the mux drivers, but they are
+ * also used as the "value" part in the alternate mode notification chain, so
+ * receivers of those notifications will always see them.
+ *
+ * Note. DisplayPort USB Type-C Alt Mode Specification version 1.0b deprecated
+ * pin assignments A, B and F, but they are still defined here for legacy
+ * purposes.
+ */
+enum {
+ TYPEC_DP_STATE_A = TYPEC_STATE_MODAL, /* Not supported after v1.0b */
+ TYPEC_DP_STATE_B, /* Not supported after v1.0b */
+ TYPEC_DP_STATE_C,
+ TYPEC_DP_STATE_D,
+ TYPEC_DP_STATE_E,
+ TYPEC_DP_STATE_F, /* Not supported after v1.0b */
+};
+
+/*
+ * struct typec_displayport_data - DisplayPort Alt Mode specific data
+ * @status: Status Update command VDO content
+ * @conf: Configure command VDO content
+ *
+ * This structure is delivered as the data part with the notifications. It
+ * contains the VDOs from the two DisplayPort Type-C alternate mode specific
+ * commands: Status Update and Configure.
+ *
+ * @status will show for example the status of the HPD signal.
+ */
+struct typec_displayport_data {
+ u32 status;
+ u32 conf;
+};
+
+enum {
+ DP_PIN_ASSIGN_A, /* Not supported after v1.0b */
+ DP_PIN_ASSIGN_B, /* Not supported after v1.0b */
+ DP_PIN_ASSIGN_C,
+ DP_PIN_ASSIGN_D,
+ DP_PIN_ASSIGN_E,
+ DP_PIN_ASSIGN_F, /* Not supported after v1.0b */
+};
+
+/* DisplayPort alt mode specific commands */
+#define DP_CMD_STATUS_UPDATE VDO_CMD_VENDOR(0)
+#define DP_CMD_CONFIGURE VDO_CMD_VENDOR(1)
+
+/* DisplayPort Capabilities VDO bits (returned with Discover Modes) */
+#define DP_CAP_CAPABILITY(_cap_) ((_cap_) & 3)
+#define DP_CAP_UFP_D 1
+#define DP_CAP_DFP_D 2
+#define DP_CAP_DFP_D_AND_UFP_D 3
+#define DP_CAP_DP_SIGNALING BIT(2) /* Always set */
+#define DP_CAP_GEN2 BIT(3) /* Reserved after v1.0b */
+#define DP_CAP_RECEPTACLE BIT(6)
+#define DP_CAP_USB BIT(7)
+#define DP_CAP_DFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(15, 8)) >> 8)
+#define DP_CAP_UFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(23, 16)) >> 16)
+
+/* DisplayPort Status Update VDO bits */
+#define DP_STATUS_CONNECTION(_status_) ((_status_) & 3)
+#define DP_STATUS_CON_DISABLED 0
+#define DP_STATUS_CON_DFP_D 1
+#define DP_STATUS_CON_UFP_D 2
+#define DP_STATUS_CON_BOTH 3
+#define DP_STATUS_POWER_LOW BIT(2)
+#define DP_STATUS_ENABLED BIT(3)
+#define DP_STATUS_PREFER_MULTI_FUNC BIT(4)
+#define DP_STATUS_SWITCH_TO_USB BIT(5)
+#define DP_STATUS_EXIT_DP_MODE BIT(6)
+#define DP_STATUS_HPD_STATE BIT(7) /* 0 = HPD_Low, 1 = HPD_High */
+#define DP_STATUS_IRQ_HPD BIT(8)
+
+/* DisplayPort Configurations VDO bits */
+#define DP_CONF_CURRENTLY(_conf_) ((_conf_) & 3)
+#define DP_CONF_UFP_U_AS_DFP_D BIT(0)
+#define DP_CONF_UFP_U_AS_UFP_D BIT(1)
+#define DP_CONF_SIGNALING_DP BIT(2)
+#define DP_CONF_SIGNALING_GEN_2 BIT(3) /* Reserved after v1.0b */
+#define DP_CONF_PIN_ASSIGNEMENT_SHIFT 8
+#define DP_CONF_PIN_ASSIGNEMENT_MASK GENMASK(15, 8)
+
+#endif /* __USB_TYPEC_DP_H */
diff --git a/include/linux/usb/typec_mux.h b/include/linux/usb/typec_mux.h
index 12c1b057834b..79293f630ee1 100644
--- a/include/linux/usb/typec_mux.h
+++ b/include/linux/usb/typec_mux.h
@@ -47,7 +47,7 @@ void typec_switch_put(struct typec_switch *sw);
int typec_switch_register(struct typec_switch *sw);
void typec_switch_unregister(struct typec_switch *sw);
-struct typec_mux *typec_mux_get(struct device *dev);
+struct typec_mux *typec_mux_get(struct device *dev, const char *name);
void typec_mux_put(struct typec_mux *mux);
int typec_mux_register(struct typec_mux *mux);
void typec_mux_unregister(struct typec_mux *mux);
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index e091f0a11b11..37c9eba75c98 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -28,7 +28,7 @@
#define UFFD_SHARED_FCNTL_FLAGS (O_CLOEXEC | O_NONBLOCK)
#define UFFD_FLAGS_SET (EFD_SHARED_FCNTL_FLAGS)
-extern int handle_userfault(struct vm_fault *vmf, unsigned long reason);
+extern vm_fault_t handle_userfault(struct vm_fault *vmf, unsigned long reason);
extern ssize_t mcopy_atomic(struct mm_struct *dst_mm, unsigned long dst_start,
unsigned long src_start, unsigned long len,
@@ -77,7 +77,8 @@ extern void userfaultfd_unmap_complete(struct mm_struct *mm,
#else /* CONFIG_USERFAULTFD */
/* mm helpers */
-static inline int handle_userfault(struct vm_fault *vmf, unsigned long reason)
+static inline vm_fault_t handle_userfault(struct vm_fault *vmf,
+ unsigned long reason)
{
return VM_FAULT_SIGBUS;
}
diff --git a/include/linux/verification.h b/include/linux/verification.h
index a10549a6c7cd..cfa4730d607a 100644
--- a/include/linux/verification.h
+++ b/include/linux/verification.h
@@ -13,6 +13,12 @@
#define _LINUX_VERIFICATION_H
/*
+ * Indicate that both builtin trusted keys and secondary trusted keys
+ * should be used.
+ */
+#define VERIFY_USE_SECONDARY_KEYRING ((struct key *)1UL)
+
+/*
* The use to which an asymmetric key is being put.
*/
enum key_being_used_for {
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index 77f0f0af3a71..a34539b7f750 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -84,8 +84,8 @@ enum vga_switcheroo_state {
* Client identifier. Audio clients use the same identifier & 0x100.
*/
enum vga_switcheroo_client_id {
- VGA_SWITCHEROO_UNKNOWN_ID = -1,
- VGA_SWITCHEROO_IGD,
+ VGA_SWITCHEROO_UNKNOWN_ID = 0x1000,
+ VGA_SWITCHEROO_IGD = 0,
VGA_SWITCHEROO_DIS,
VGA_SWITCHEROO_MAX_CLIENTS,
};
@@ -151,7 +151,7 @@ int vga_switcheroo_register_client(struct pci_dev *dev,
bool driver_power_control);
int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
- enum vga_switcheroo_client_id id);
+ struct pci_dev *vga_dev);
void vga_switcheroo_client_fb_set(struct pci_dev *dev,
struct fb_info *info);
@@ -180,7 +180,7 @@ static inline int vga_switcheroo_register_handler(const struct vga_switcheroo_ha
enum vga_switcheroo_handler_flags_t handler_flags) { return 0; }
static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
const struct vga_switcheroo_client_ops *ops,
- enum vga_switcheroo_client_id id) { return 0; }
+ struct pci_dev *vga_dev) { return 0; }
static inline void vga_switcheroo_unregister_handler(void) {}
static inline enum vga_switcheroo_handler_flags_t vga_switcheroo_handler_flags(void) { return 0; }
static inline int vga_switcheroo_lock_ddc(struct pci_dev *pdev) { return -ENODEV; }
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 5559a2d31c46..32baf8e26735 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -79,7 +79,8 @@ struct virtio_config_ops {
u64 (*get_features)(struct virtio_device *vdev);
int (*finalize_features)(struct virtio_device *vdev);
const char *(*bus_name)(struct virtio_device *vdev);
- int (*set_vq_affinity)(struct virtqueue *vq, int cpu);
+ int (*set_vq_affinity)(struct virtqueue *vq,
+ const struct cpumask *cpu_mask);
const struct cpumask *(*get_vq_affinity)(struct virtio_device *vdev,
int index);
};
@@ -236,11 +237,11 @@ const char *virtio_bus_name(struct virtio_device *vdev)
*
*/
static inline
-int virtqueue_set_affinity(struct virtqueue *vq, int cpu)
+int virtqueue_set_affinity(struct virtqueue *vq, const struct cpumask *cpu_mask)
{
struct virtio_device *vdev = vq->vdev;
if (vdev->config->set_vq_affinity)
- return vdev->config->set_vq_affinity(vq, cpu);
+ return vdev->config->set_vq_affinity(vq, cpu_mask);
return 0;
}
diff --git a/include/linux/vmacache.h b/include/linux/vmacache.h
index a5b3aa8d281f..3e9a963edd6a 100644
--- a/include/linux/vmacache.h
+++ b/include/linux/vmacache.h
@@ -5,12 +5,6 @@
#include <linux/sched.h>
#include <linux/mm.h>
-/*
- * Hash based on the page number. Provides a good hit rate for
- * workloads with good locality and those with random accesses as well.
- */
-#define VMACACHE_HASH(addr) ((addr >> PAGE_SHIFT) & VMACACHE_MASK)
-
static inline void vmacache_flush(struct task_struct *tsk)
{
memset(tsk->vmacache.vmas, 0, sizeof(tsk->vmacache.vmas));
diff --git a/include/linux/w1.h b/include/linux/w1.h
index 694101f744c7..3111585c371f 100644
--- a/include/linux/w1.h
+++ b/include/linux/w1.h
@@ -274,6 +274,8 @@ struct w1_family {
struct w1_family_ops *fops;
+ const struct of_device_id *of_match_table;
+
atomic_t refcnt;
};
diff --git a/include/linux/wkup_m3_ipc.h b/include/linux/wkup_m3_ipc.h
index d6ba7d39a62f..e497e621dbb7 100644
--- a/include/linux/wkup_m3_ipc.h
+++ b/include/linux/wkup_m3_ipc.h
@@ -40,6 +40,12 @@ struct wkup_m3_ipc {
struct mbox_chan *mbox;
struct wkup_m3_ipc_ops *ops;
+ int is_rtc_only;
+};
+
+struct wkup_m3_wakeup_src {
+ int irq_nr;
+ char src[10];
};
struct wkup_m3_ipc_ops {
@@ -48,8 +54,11 @@ struct wkup_m3_ipc_ops {
int (*prepare_low_power)(struct wkup_m3_ipc *m3_ipc, int state);
int (*finish_low_power)(struct wkup_m3_ipc *m3_ipc);
int (*request_pm_status)(struct wkup_m3_ipc *m3_ipc);
+ const char *(*request_wake_src)(struct wkup_m3_ipc *m3_ipc);
+ void (*set_rtc_only)(struct wkup_m3_ipc *m3_ipc);
};
struct wkup_m3_ipc *wkup_m3_ipc_get(void);
void wkup_m3_ipc_put(struct wkup_m3_ipc *m3_ipc);
+void wkup_m3_set_rtc_only_mode(void);
#endif /* _LINUX_WKUP_M3_IPC_H */
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 39fda195bf78..3af7c0e03be5 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -6,8 +6,10 @@
*
* Copyright (C) 2004, 2005, 2006 Red Hat, Inc., Ingo Molnar <mingo@redhat.com>
*
- * Wound/wait implementation:
+ * Wait/Die implementation:
* Copyright (C) 2013 Canonical Ltd.
+ * Choice of algorithm:
+ * Copyright (C) 2018 WMWare Inc.
*
* This file contains the main data structure and API definitions.
*/
@@ -23,14 +25,17 @@ struct ww_class {
struct lock_class_key mutex_key;
const char *acquire_name;
const char *mutex_name;
+ unsigned int is_wait_die;
};
struct ww_acquire_ctx {
struct task_struct *task;
unsigned long stamp;
- unsigned acquired;
+ unsigned int acquired;
+ unsigned short wounded;
+ unsigned short is_wait_die;
#ifdef CONFIG_DEBUG_MUTEXES
- unsigned done_acquire;
+ unsigned int done_acquire;
struct ww_class *ww_class;
struct ww_mutex *contending_lock;
#endif
@@ -38,8 +43,8 @@ struct ww_acquire_ctx {
struct lockdep_map dep_map;
#endif
#ifdef CONFIG_DEBUG_WW_MUTEX_SLOWPATH
- unsigned deadlock_inject_interval;
- unsigned deadlock_inject_countdown;
+ unsigned int deadlock_inject_interval;
+ unsigned int deadlock_inject_countdown;
#endif
};
@@ -58,17 +63,21 @@ struct ww_mutex {
# define __WW_CLASS_MUTEX_INITIALIZER(lockname, class)
#endif
-#define __WW_CLASS_INITIALIZER(ww_class) \
+#define __WW_CLASS_INITIALIZER(ww_class, _is_wait_die) \
{ .stamp = ATOMIC_LONG_INIT(0) \
, .acquire_name = #ww_class "_acquire" \
- , .mutex_name = #ww_class "_mutex" }
+ , .mutex_name = #ww_class "_mutex" \
+ , .is_wait_die = _is_wait_die }
#define __WW_MUTEX_INITIALIZER(lockname, class) \
{ .base = __MUTEX_INITIALIZER(lockname.base) \
__WW_CLASS_MUTEX_INITIALIZER(lockname, class) }
+#define DEFINE_WD_CLASS(classname) \
+ struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 1)
+
#define DEFINE_WW_CLASS(classname) \
- struct ww_class classname = __WW_CLASS_INITIALIZER(classname)
+ struct ww_class classname = __WW_CLASS_INITIALIZER(classname, 0)
#define DEFINE_WW_MUTEX(mutexname, ww_class) \
struct ww_mutex mutexname = __WW_MUTEX_INITIALIZER(mutexname, ww_class)
@@ -102,7 +111,7 @@ static inline void ww_mutex_init(struct ww_mutex *lock,
*
* Context-based w/w mutex acquiring can be done in any order whatsoever within
* a given lock class. Deadlocks will be detected and handled with the
- * wait/wound logic.
+ * wait/die logic.
*
* Mixing of context-based w/w mutex acquiring and single w/w mutex locking can
* result in undetected deadlocks and is so forbidden. Mixing different contexts
@@ -123,6 +132,8 @@ static inline void ww_acquire_init(struct ww_acquire_ctx *ctx,
ctx->task = current;
ctx->stamp = atomic_long_inc_return_relaxed(&ww_class->stamp);
ctx->acquired = 0;
+ ctx->wounded = false;
+ ctx->is_wait_die = ww_class->is_wait_die;
#ifdef CONFIG_DEBUG_MUTEXES
ctx->ww_class = ww_class;
ctx->done_acquire = 0;
@@ -195,13 +206,13 @@ static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
* Lock the w/w mutex exclusively for this task.
*
* Deadlocks within a given w/w class of locks are detected and handled with the
- * wait/wound algorithm. If the lock isn't immediately avaiable this function
+ * wait/die algorithm. If the lock isn't immediately available this function
* will either sleep until it is (wait case). Or it selects the current context
- * for backing off by returning -EDEADLK (wound case). Trying to acquire the
+ * for backing off by returning -EDEADLK (die case). Trying to acquire the
* same lock with the same context twice is also detected and signalled by
* returning -EALREADY. Returns 0 if the mutex was successfully acquired.
*
- * In the wound case the caller must release all currently held w/w mutexes for
+ * In the die case the caller must release all currently held w/w mutexes for
* the given context and then wait for this contending lock to be available by
* calling ww_mutex_lock_slow. Alternatively callers can opt to not acquire this
* lock and proceed with trying to acquire further w/w mutexes (e.g. when
@@ -226,14 +237,14 @@ extern int /* __must_check */ ww_mutex_lock(struct ww_mutex *lock, struct ww_acq
* Lock the w/w mutex exclusively for this task.
*
* Deadlocks within a given w/w class of locks are detected and handled with the
- * wait/wound algorithm. If the lock isn't immediately avaiable this function
+ * wait/die algorithm. If the lock isn't immediately available this function
* will either sleep until it is (wait case). Or it selects the current context
- * for backing off by returning -EDEADLK (wound case). Trying to acquire the
+ * for backing off by returning -EDEADLK (die case). Trying to acquire the
* same lock with the same context twice is also detected and signalled by
* returning -EALREADY. Returns 0 if the mutex was successfully acquired. If a
* signal arrives while waiting for the lock then this function returns -EINTR.
*
- * In the wound case the caller must release all currently held w/w mutexes for
+ * In the die case the caller must release all currently held w/w mutexes for
* the given context and then wait for this contending lock to be available by
* calling ww_mutex_lock_slow_interruptible. Alternatively callers can opt to
* not acquire this lock and proceed with trying to acquire further w/w mutexes
@@ -256,7 +267,7 @@ extern int __must_check ww_mutex_lock_interruptible(struct ww_mutex *lock,
* @lock: the mutex to be acquired
* @ctx: w/w acquire context
*
- * Acquires a w/w mutex with the given context after a wound case. This function
+ * Acquires a w/w mutex with the given context after a die case. This function
* will sleep until the lock becomes available.
*
* The caller must have released all w/w mutexes already acquired with the
@@ -290,7 +301,7 @@ ww_mutex_lock_slow(struct ww_mutex *lock, struct ww_acquire_ctx *ctx)
* @lock: the mutex to be acquired
* @ctx: w/w acquire context
*
- * Acquires a w/w mutex with the given context after a wound case. This function
+ * Acquires a w/w mutex with the given context after a die case. This function
* will sleep until the lock becomes available and returns 0 when the lock has
* been acquired. If a signal arrives while waiting for the lock then this
* function returns -EINTR.
diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h
index cf0add70b0e7..814eeef35a5c 100644
--- a/include/media/cec-notifier.h
+++ b/include/media/cec-notifier.h
@@ -20,8 +20,10 @@ struct cec_notifier;
#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER)
/**
- * cec_notifier_get - find or create a new cec_notifier for the given device.
+ * cec_notifier_get_conn - find or create a new cec_notifier for the given
+ * device and connector tuple.
* @dev: device that sends the events.
+ * @conn: the connector name from which the event occurs
*
* If a notifier for device @dev already exists, then increase the refcount
* and return that notifier.
@@ -31,7 +33,8 @@ struct cec_notifier;
*
* Return NULL if the memory could not be allocated.
*/
-struct cec_notifier *cec_notifier_get(struct device *dev);
+struct cec_notifier *cec_notifier_get_conn(struct device *dev,
+ const char *conn);
/**
* cec_notifier_put - decrease refcount and delete when the refcount reaches 0.
@@ -85,7 +88,8 @@ void cec_register_cec_notifier(struct cec_adapter *adap,
struct cec_notifier *notifier);
#else
-static inline struct cec_notifier *cec_notifier_get(struct device *dev)
+static inline struct cec_notifier *cec_notifier_get_conn(struct device *dev,
+ const char *conn)
{
/* A non-NULL pointer is expected on success */
return (struct cec_notifier *)0xdeadfeed;
@@ -121,6 +125,23 @@ static inline void cec_register_cec_notifier(struct cec_adapter *adap,
#endif
/**
+ * cec_notifier_get - find or create a new cec_notifier for the given device.
+ * @dev: device that sends the events.
+ *
+ * If a notifier for device @dev already exists, then increase the refcount
+ * and return that notifier.
+ *
+ * If it doesn't exist, then allocate a new notifier struct and return a
+ * pointer to that new struct.
+ *
+ * Return NULL if the memory could not be allocated.
+ */
+static inline struct cec_notifier *cec_notifier_get(struct device *dev)
+{
+ return cec_notifier_get_conn(dev, NULL);
+}
+
+/**
* cec_notifier_phys_addr_invalidate() - set the physical address to INVALID
*
* @n: the CEC notifier
diff --git a/include/media/cec-pin.h b/include/media/cec-pin.h
index ed16c6dde0ba..604e79cb6cbf 100644
--- a/include/media/cec-pin.h
+++ b/include/media/cec-pin.h
@@ -25,6 +25,9 @@
* @read_hpd: read the HPD pin. Return true if high, false if low or
* an error if negative. If NULL or -ENOTTY is returned,
* then this is not supported.
+ * @read_5v: read the 5V pin. Return true if high, false if low or
+ * an error if negative. If NULL or -ENOTTY is returned,
+ * then this is not supported.
*
* These operations are used by the cec pin framework to manipulate
* the CEC pin.
@@ -38,6 +41,7 @@ struct cec_pin_ops {
void (*free)(struct cec_adapter *adap);
void (*status)(struct cec_adapter *adap, struct seq_file *file);
int (*read_hpd)(struct cec_adapter *adap);
+ int (*read_5v)(struct cec_adapter *adap);
};
/**
diff --git a/include/media/cec.h b/include/media/cec.h
index 580ab1042898..ff9847f7f99d 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -79,7 +79,7 @@ struct cec_event_entry {
};
#define CEC_NUM_CORE_EVENTS 2
-#define CEC_NUM_EVENTS CEC_EVENT_PIN_HPD_HIGH
+#define CEC_NUM_EVENTS CEC_EVENT_PIN_5V_HIGH
struct cec_fh {
struct list_head list;
@@ -309,6 +309,16 @@ void cec_queue_pin_cec_event(struct cec_adapter *adap, bool is_high,
void cec_queue_pin_hpd_event(struct cec_adapter *adap, bool is_high, ktime_t ts);
/**
+ * cec_queue_pin_5v_event() - queue a pin event with a given timestamp.
+ *
+ * @adap: pointer to the cec adapter
+ * @is_high: when true the 5V pin is high, otherwise it is low
+ * @ts: the timestamp for this event
+ *
+ */
+void cec_queue_pin_5v_event(struct cec_adapter *adap, bool is_high, ktime_t ts);
+
+/**
* cec_get_edid_phys_addr() - find and return the physical address
*
* @edid: pointer to the EDID data
diff --git a/include/media/dvb_frontend.h b/include/media/dvb_frontend.h
index 331c8269c00e..6f7a85ab3541 100644
--- a/include/media/dvb_frontend.h
+++ b/include/media/dvb_frontend.h
@@ -52,6 +52,10 @@
*/
#define MAX_DELSYS 8
+/* Helper definitions to be used at frontend drivers */
+#define kHz 1000UL
+#define MHz 1000000UL
+
/**
* struct dvb_frontend_tune_settings - parameters to adjust frontend tuning
*
@@ -73,22 +77,19 @@ struct dvb_frontend;
* struct dvb_tuner_info - Frontend name and min/max ranges/bandwidths
*
* @name: name of the Frontend
- * @frequency_min: minimal frequency supported
- * @frequency_max: maximum frequency supported
- * @frequency_step: frequency step
+ * @frequency_min_hz: minimal frequency supported in Hz
+ * @frequency_max_hz: maximum frequency supported in Hz
+ * @frequency_step_hz: frequency step in Hz
* @bandwidth_min: minimal frontend bandwidth supported
* @bandwidth_max: maximum frontend bandwidth supported
* @bandwidth_step: frontend bandwidth step
- *
- * NOTE: frequency parameters are in Hz, for terrestrial/cable or kHz for
- * satellite.
*/
struct dvb_tuner_info {
char name[128];
- u32 frequency_min;
- u32 frequency_max;
- u32 frequency_step;
+ u32 frequency_min_hz;
+ u32 frequency_max_hz;
+ u32 frequency_step_hz;
u32 bandwidth_min;
u32 bandwidth_max;
@@ -316,6 +317,34 @@ struct analog_demod_ops {
struct dtv_frontend_properties;
+/**
+ * struct dvb_frontend_internal_info - Frontend properties and capabilities
+ *
+ * @name: Name of the frontend
+ * @frequency_min_hz: Minimal frequency supported by the frontend.
+ * @frequency_max_hz: Minimal frequency supported by the frontend.
+ * @frequency_stepsize_hz: All frequencies are multiple of this value.
+ * @frequency_tolerance_hz: Frequency tolerance.
+ * @symbol_rate_min: Minimal symbol rate, in bauds
+ * (for Cable/Satellite systems).
+ * @symbol_rate_max: Maximal symbol rate, in bauds
+ * (for Cable/Satellite systems).
+ * @symbol_rate_tolerance: Maximal symbol rate tolerance, in ppm
+ * (for Cable/Satellite systems).
+ * @caps: Capabilities supported by the frontend,
+ * as specified in &enum fe_caps.
+ */
+struct dvb_frontend_internal_info {
+ char name[128];
+ u32 frequency_min_hz;
+ u32 frequency_max_hz;
+ u32 frequency_stepsize_hz;
+ u32 frequency_tolerance_hz;
+ u32 symbol_rate_min;
+ u32 symbol_rate_max;
+ u32 symbol_rate_tolerance;
+ enum fe_caps caps;
+};
/**
* struct dvb_frontend_ops - Demodulation information and callbacks for
@@ -403,7 +432,7 @@ struct dtv_frontend_properties;
* @analog_ops: pointer to &struct analog_demod_ops
*/
struct dvb_frontend_ops {
- struct dvb_frontend_info info;
+ struct dvb_frontend_internal_info info;
u8 delsys[MAX_DELSYS];
diff --git a/include/media/i2c/lm3560.h b/include/media/i2c/lm3560.h
index a5bd310c9e1e..0e2b1c751a5d 100644
--- a/include/media/i2c/lm3560.h
+++ b/include/media/i2c/lm3560.h
@@ -22,6 +22,7 @@
#include <media/v4l2-subdev.h>
+#define LM3559_NAME "lm3559"
#define LM3560_NAME "lm3560"
#define LM3560_I2C_ADDR (0x53)
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index 160bca96d524..cdc87ec61e54 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -338,7 +338,7 @@ void v4l_bound_align_image(unsigned int *width, unsigned int wmin,
({ \
BUILD_BUG_ON(sizeof((array)->width_field) != sizeof(u32) || \
sizeof((array)->height_field) != sizeof(u32)); \
- (typeof(&(*(array))))__v4l2_find_nearest_size( \
+ (typeof(&(array)[0]))__v4l2_find_nearest_size( \
(array), array_size, sizeof(*(array)), \
offsetof(typeof(*(array)), width_field), \
offsetof(typeof(*(array)), height_field), \
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 5b445b5654f7..f615ba1b29dd 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -181,10 +181,10 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
* not freed when the control is deleted. Should this be needed
* then a new internal bitfield can be added to tell the framework
* to free this pointer.
- * @p_cur: The control's current value represented via a union with
+ * @p_cur: The control's current value represented via a union which
* provides a standard way of accessing control types
* through a pointer.
- * @p_new: The control's new value represented via a union with provides
+ * @p_new: The control's new value represented via a union which provides
* a standard way of accessing control types
* through a pointer.
*/
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index a8dbf5b54b5c..5848d92c30da 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -621,7 +621,7 @@ const char *v4l2_norm_to_name(v4l2_std_id id);
* v4l2_video_std_frame_period - Ancillary routine that fills a
* struct &v4l2_fract pointer with the default framerate fraction.
*
- * @id: analog TV sdandard ID.
+ * @id: analog TV standard ID.
* @frameperiod: struct &v4l2_fract pointer to be filled
*
*/
@@ -632,7 +632,7 @@ void v4l2_video_std_frame_period(int id, struct v4l2_fract *frameperiod);
* a &v4l2_standard structure according to the @id parameter.
*
* @vs: struct &v4l2_standard pointer to be filled
- * @id: analog TV sdandard ID.
+ * @id: analog TV standard ID.
* @name: name of the standard to be used
*
* .. note::
@@ -643,6 +643,17 @@ int v4l2_video_std_construct(struct v4l2_standard *vs,
int id, const char *name);
/**
+ * v4l_video_std_enumstd - Ancillary routine that fills in the fields of
+ * a &v4l2_standard structure according to the @id and @vs->index
+ * parameters.
+ *
+ * @vs: struct &v4l2_standard pointer to be filled.
+ * @id: analog TV standard ID.
+ *
+ */
+int v4l_video_std_enumstd(struct v4l2_standard *vs, v4l2_std_id id);
+
+/**
* v4l_printk_ioctl - Ancillary routine that prints the ioctl in a
* human-readable format.
*
diff --git a/include/media/v4l2-mediabus.h b/include/media/v4l2-mediabus.h
index 4d8626c468bc..4bbb5f3d2b02 100644
--- a/include/media/v4l2-mediabus.h
+++ b/include/media/v4l2-mediabus.h
@@ -45,6 +45,8 @@
/* Active state of Sync-on-green (SoG) signal, 0/1 for LOW/HIGH respectively. */
#define V4L2_MBUS_VIDEO_SOG_ACTIVE_HIGH BIT(12)
#define V4L2_MBUS_VIDEO_SOG_ACTIVE_LOW BIT(13)
+#define V4L2_MBUS_DATA_ENABLE_HIGH BIT(14)
+#define V4L2_MBUS_DATA_ENABLE_LOW BIT(15)
/* Serial flags */
/* How many lanes the client can use */
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 3d07ba3a8262..d655720e16a1 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -32,7 +32,7 @@
* assumed that one source and one destination buffer are all
* that is required for the driver to perform one full transaction.
* This method may not sleep.
- * @job_abort: required. Informs the driver that it has to abort the currently
+ * @job_abort: optional. Informs the driver that it has to abort the currently
* running transaction as soon as possible (i.e. as soon as it can
* stop the device safely; e.g. in the next interrupt handler),
* even if the transaction would not have been finished by then.
@@ -40,19 +40,14 @@
* v4l2_m2m_job_finish() (as if the transaction ended normally).
* This function does not have to (and will usually not) wait
* until the device enters a state when it can be stopped.
- * @lock: optional. Define a driver's own lock callback, instead of using
- * &v4l2_m2m_ctx->q_lock.
- * @unlock: optional. Define a driver's own unlock callback, instead of
- * using &v4l2_m2m_ctx->q_lock.
*/
struct v4l2_m2m_ops {
void (*device_run)(void *priv);
int (*job_ready)(void *priv);
void (*job_abort)(void *priv);
- void (*lock)(void *priv);
- void (*unlock)(void *priv);
};
+struct video_device;
struct v4l2_m2m_dev;
/**
@@ -328,6 +323,24 @@ int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
*/
struct v4l2_m2m_dev *v4l2_m2m_init(const struct v4l2_m2m_ops *m2m_ops);
+#if defined(CONFIG_MEDIA_CONTROLLER)
+void v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev);
+int v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
+ struct video_device *vdev, int function);
+#else
+static inline void
+v4l2_m2m_unregister_media_controller(struct v4l2_m2m_dev *m2m_dev)
+{
+}
+
+static inline int
+v4l2_m2m_register_media_controller(struct v4l2_m2m_dev *m2m_dev,
+ struct video_device *vdev, int function)
+{
+ return 0;
+}
+#endif
+
/**
* v4l2_m2m_release() - cleans up and frees a m2m_dev structure
*
@@ -437,6 +450,35 @@ static inline void *v4l2_m2m_next_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
}
/**
+ * v4l2_m2m_last_buf() - return last buffer from the list of ready buffers
+ *
+ * @q_ctx: pointer to struct @v4l2_m2m_queue_ctx
+ */
+void *v4l2_m2m_last_buf(struct v4l2_m2m_queue_ctx *q_ctx);
+
+/**
+ * v4l2_m2m_last_src_buf() - return last destination buffer from the list of
+ * ready buffers
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ */
+static inline void *v4l2_m2m_last_src_buf(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ return v4l2_m2m_last_buf(&m2m_ctx->out_q_ctx);
+}
+
+/**
+ * v4l2_m2m_last_dst_buf() - return last destination buffer from the list of
+ * ready buffers
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ */
+static inline void *v4l2_m2m_last_dst_buf(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ return v4l2_m2m_last_buf(&m2m_ctx->cap_q_ctx);
+}
+
+/**
* v4l2_m2m_for_each_dst_buf() - iterate over a list of destination ready
* buffers
*
diff --git a/include/media/vsp1.h b/include/media/vsp1.h
index 678c24de1ac6..3093b9cb9067 100644
--- a/include/media/vsp1.h
+++ b/include/media/vsp1.h
@@ -25,6 +25,7 @@ int vsp1_du_init(struct device *dev);
* struct vsp1_du_lif_config - VSP LIF configuration
* @width: output frame width
* @height: output frame height
+ * @interlaced: true for interlaced pipelines
* @callback: frame completion callback function (optional). When a callback
* is provided, the VSP driver guarantees that it will be called once
* and only once for each vsp1_du_atomic_flush() call.
@@ -33,6 +34,7 @@ int vsp1_du_init(struct device *dev);
struct vsp1_du_lif_config {
unsigned int width;
unsigned int height;
+ bool interlaced;
void (*callback)(void *data, bool completed, u32 crc);
void *callback_data;
diff --git a/include/misc/cxl-base.h b/include/misc/cxl-base.h
index b2ebc91fe09a..f53808fa638a 100644
--- a/include/misc/cxl-base.h
+++ b/include/misc/cxl-base.h
@@ -10,8 +10,6 @@
#ifndef _MISC_CXL_BASE_H
#define _MISC_CXL_BASE_H
-#include <misc/cxl.h>
-
#ifdef CONFIG_CXL_BASE
#define CXL_IRQ_RANGES 4
@@ -41,10 +39,6 @@ static inline void cxl_ctx_put(void)
struct cxl_afu *cxl_afu_get(struct cxl_afu *afu);
void cxl_afu_put(struct cxl_afu *afu);
void cxl_slbia(struct mm_struct *mm);
-bool cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu);
-void cxl_pci_disable_device(struct pci_dev *dev);
-int cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
-void cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev);
#else /* CONFIG_CXL_BASE */
@@ -52,10 +46,6 @@ static inline bool cxl_ctx_in_use(void) { return false; }
static inline struct cxl_afu *cxl_afu_get(struct cxl_afu *afu) { return NULL; }
static inline void cxl_afu_put(struct cxl_afu *afu) {}
static inline void cxl_slbia(struct mm_struct *mm) {}
-static inline bool cxl_pci_associate_default_context(struct pci_dev *dev, struct cxl_afu *afu) { return false; }
-static inline void cxl_pci_disable_device(struct pci_dev *dev) {}
-static inline int cxl_cx4_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) { return -ENODEV; }
-static inline void cxl_cx4_teardown_msi_irqs(struct pci_dev *pdev) {}
#endif /* CONFIG_CXL_BASE */
diff --git a/include/misc/cxl.h b/include/misc/cxl.h
index b712be544f8c..ea9ff4a1a9ca 100644
--- a/include/misc/cxl.h
+++ b/include/misc/cxl.h
@@ -24,46 +24,6 @@
* generic PCI API. This API is agnostic to the actual AFU.
*/
-#define CXL_SLOT_FLAG_DMA 0x1
-
-/*
- * Checks if the given card is in a cxl capable slot. Pass CXL_SLOT_FLAG_DMA if
- * the card requires CAPP DMA mode to also check if the system supports it.
- * This is intended to be used by bi-modal devices to determine if they can use
- * cxl mode or if they should continue running in PCI mode.
- *
- * Note that this only checks if the slot is cxl capable - it does not
- * currently check if the CAPP is currently available for chips where it can be
- * assigned to different PHBs on a first come first serve basis (i.e. P8)
- */
-bool cxl_slot_is_supported(struct pci_dev *dev, int flags);
-
-
-#define CXL_BIMODE_CXL 1
-#define CXL_BIMODE_PCI 2
-
-/*
- * Check the mode that the given bi-modal CXL adapter is currently in and
- * change it if necessary. This does not apply to AFU drivers.
- *
- * If the mode matches the requested mode this function will return 0 - if the
- * driver was expecting the generic CXL driver to have bound to the adapter and
- * it gets this return value it should fail the probe function to give the CXL
- * driver a chance to probe it.
- *
- * If the mode does not match it will start a background task to unplug the
- * device from Linux and switch its mode, and will return -EBUSY. At this
- * point the calling driver should make sure it has released the device and
- * fail its probe function.
- *
- * The offset of the CXL VSEC can be provided to this function. If 0 is passed,
- * this function will search for a CXL VSEC with ID 0x1280 and return -ENODEV
- * if it is not found.
- */
-#ifdef CONFIG_CXL_BIMODAL
-int cxl_check_and_switch_mode(struct pci_dev *dev, int mode, int vsec);
-#endif
-
/* Get the AFU associated with a pci_dev */
struct cxl_afu *cxl_pci_to_afu(struct pci_dev *dev);
@@ -174,14 +134,6 @@ int cxl_afu_reset(struct cxl_context *ctx);
void cxl_set_master(struct cxl_context *ctx);
/*
- * Sets the context to use real mode memory accesses to operate with
- * translation disabled. Note that this only makes sense for kernel contexts
- * under bare metal, and will not work with virtualisation. May only be
- * performed on stopped contexts.
- */
-int cxl_set_translation_mode(struct cxl_context *ctx, bool real_mode);
-
-/*
* Map and unmap the AFU Problem Space area. The amount and location mapped
* depends on if this context is a master or slave.
*/
@@ -192,26 +144,6 @@ void cxl_psa_unmap(void __iomem *addr);
int cxl_process_element(struct cxl_context *ctx);
/*
- * Limit the number of interrupts that a single context can allocate via
- * cxl_start_work. If using the api with a real phb, this may be used to
- * request that additional default contexts be created when allocating
- * interrupts via pci_enable_msix_range. These will be set to the same running
- * state as the default context, and if that is running it will reuse the
- * parameters previously passed to cxl_start_context for the default context.
- */
-int cxl_set_max_irqs_per_process(struct pci_dev *dev, int irqs);
-int cxl_get_max_irqs_per_process(struct pci_dev *dev);
-
-/*
- * Use to simultaneously iterate over hardware interrupt numbers, contexts and
- * afu interrupt numbers allocated for the device via pci_enable_msix_range and
- * is a useful convenience function when working with hardware that has
- * limitations on the number of interrupts per process. *ctx and *afu_irq
- * should be NULL and 0 to start the iteration.
- */
-int cxl_next_msi_hwirq(struct pci_dev *pdev, struct cxl_context **ctx, int *afu_irq);
-
-/*
* These calls allow drivers to create their own file descriptors and make them
* identical to the cxl file descriptor user API. An example use case:
*
diff --git a/include/net/9p/client.h b/include/net/9p/client.h
index 7af9d769b97d..0fa0fbab33b0 100644
--- a/include/net/9p/client.h
+++ b/include/net/9p/client.h
@@ -27,6 +27,7 @@
#define NET_9P_CLIENT_H
#include <linux/utsname.h>
+#include <linux/idr.h>
/* Number of requests per row */
#define P9_ROW_MAXTAG 255
@@ -112,7 +113,7 @@ enum p9_req_status_t {
struct p9_req_t {
int status;
int t_err;
- wait_queue_head_t *wq;
+ wait_queue_head_t wq;
struct p9_fcall *tc;
struct p9_fcall *rc;
void *aux;
@@ -128,8 +129,7 @@ struct p9_req_t {
* @proto_version: 9P protocol version to use
* @trans_mod: module API instantiated with this client
* @trans: tranport instance state and API
- * @fidpool: fid handle accounting for session
- * @fidlist: List of active fid handles
+ * @fids: All active FID handles
* @tagpool - transaction id accounting for session
* @reqs - 2D array of requests
* @max_tag - current maximum tag id allocated
@@ -169,8 +169,7 @@ struct p9_client {
} tcp;
} trans_opts;
- struct p9_idpool *fidpool;
- struct list_head fidlist;
+ struct idr fids;
struct p9_idpool *tagpool;
struct p9_req_t *reqs[P9_ROW_MAXTAG];
@@ -188,7 +187,6 @@ struct p9_client {
* @iounit: the server reported maximum transaction size for this file
* @uid: the numeric uid of the local user who owns this handle
* @rdir: readdir accounting structure (allocated on demand)
- * @flist: per-client-instance fid tracking
* @dlist: per-dentry fid tracking
*
* TODO: This needs lots of explanation.
@@ -204,7 +202,6 @@ struct p9_fid {
void *rdir;
- struct list_head flist;
struct hlist_node dlist; /* list of all fids attached to a dentry */
};
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 9e59ebfded62..1ad5b19e83a9 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -6,6 +6,7 @@
* Public action API for classifiers/qdiscs
*/
+#include <linux/refcount.h>
#include <net/sch_generic.h>
#include <net/pkt_sched.h>
#include <net/net_namespace.h>
@@ -26,8 +27,8 @@ struct tc_action {
struct tcf_idrinfo *idrinfo;
u32 tcfa_index;
- int tcfa_refcnt;
- int tcfa_bindcnt;
+ refcount_t tcfa_refcnt;
+ atomic_t tcfa_bindcnt;
u32 tcfa_capab;
int tcfa_action;
struct tcf_t tcfa_tm;
@@ -37,7 +38,7 @@ struct tc_action {
spinlock_t tcfa_lock;
struct gnet_stats_basic_cpu __percpu *cpu_bstats;
struct gnet_stats_queue __percpu *cpu_qstats;
- struct tc_cookie *act_cookie;
+ struct tc_cookie __rcu *act_cookie;
struct tcf_chain *goto_chain;
};
#define tcf_index common.tcfa_index
@@ -84,14 +85,15 @@ struct tc_action_ops {
size_t size;
struct module *owner;
int (*act)(struct sk_buff *, const struct tc_action *,
- struct tcf_result *);
+ struct tcf_result *); /* called under RCU BH lock*/
int (*dump)(struct sk_buff *, struct tc_action *, int, int);
void (*cleanup)(struct tc_action *);
int (*lookup)(struct net *net, struct tc_action **a, u32 index,
struct netlink_ext_ack *extack);
int (*init)(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **act, int ovr,
- int bind, struct netlink_ext_ack *extack);
+ int bind, bool rtnl_held,
+ struct netlink_ext_ack *extack);
int (*walk)(struct net *, struct sk_buff *,
struct netlink_callback *, int,
const struct tc_action_ops *,
@@ -99,6 +101,8 @@ struct tc_action_ops {
void (*stats_update)(struct tc_action *, u64, u32, u64);
size_t (*get_fill_size)(const struct tc_action *act);
struct net_device *(*get_dev)(const struct tc_action *a);
+ void (*put_dev)(struct net_device *dev);
+ int (*delete)(struct net *net, u32 index);
};
struct tc_action_net {
@@ -151,6 +155,10 @@ int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
int bind, bool cpustats);
void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
+void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
+int tcf_idr_check_alloc(struct tc_action_net *tn, u32 *index,
+ struct tc_action **a, int bind);
+int tcf_idr_delete_index(struct tc_action_net *tn, u32 index);
int __tcf_idr_release(struct tc_action *a, bool bind, bool strict);
static inline int tcf_idr_release(struct tc_action *a, bool bind)
@@ -161,18 +169,20 @@ static inline int tcf_idr_release(struct tc_action *a, bool bind)
int tcf_register_action(struct tc_action_ops *a, struct pernet_operations *ops);
int tcf_unregister_action(struct tc_action_ops *a,
struct pernet_operations *ops);
-int tcf_action_destroy(struct list_head *actions, int bind);
+int tcf_action_destroy(struct tc_action *actions[], int bind);
int tcf_action_exec(struct sk_buff *skb, struct tc_action **actions,
int nr_actions, struct tcf_result *res);
int tcf_action_init(struct net *net, struct tcf_proto *tp, struct nlattr *nla,
struct nlattr *est, char *name, int ovr, int bind,
- struct list_head *actions, size_t *attr_size,
- struct netlink_ext_ack *extack);
+ struct tc_action *actions[], size_t *attr_size,
+ bool rtnl_held, struct netlink_ext_ack *extack);
struct tc_action *tcf_action_init_1(struct net *net, struct tcf_proto *tp,
struct nlattr *nla, struct nlattr *est,
char *name, int ovr, int bind,
+ bool rtnl_held,
struct netlink_ext_ack *extack);
-int tcf_action_dump(struct sk_buff *skb, struct list_head *, int, int);
+int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
+ int ref);
int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
@@ -190,9 +200,6 @@ static inline void tcf_action_stats_update(struct tc_action *a, u64 bytes,
#endif
}
-typedef int tc_setup_cb_t(enum tc_setup_type type,
- void *type_data, void *cb_priv);
-
#ifdef CONFIG_NET_CLS_ACT
int tc_setup_cb_egdev_register(const struct net_device *dev,
tc_setup_cb_t *cb, void *cb_priv);
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 5f43f7a70fe6..6def0351bcc3 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -108,6 +108,7 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr,
u32 banned_flags);
bool inet_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2,
bool match_wildcard);
+bool inet_rcv_saddr_any(const struct sock *sk);
void addrconf_join_solict(struct net_device *dev, const struct in6_addr *addr);
void addrconf_leave_solict(struct inet6_dev *idev, const struct in6_addr *addr);
diff --git a/include/net/af_ieee802154.h b/include/net/af_ieee802154.h
index a5563d27a3eb..8003a9f6eb43 100644
--- a/include/net/af_ieee802154.h
+++ b/include/net/af_ieee802154.h
@@ -56,6 +56,7 @@ struct sockaddr_ieee802154 {
#define WPAN_WANTACK 0
#define WPAN_SECURITY 1
#define WPAN_SECURITY_LEVEL 2
+#define WPAN_WANTLQI 3
#define WPAN_SECURITY_DEFAULT 0
#define WPAN_SECURITY_OFF 1
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 8ae8ee004258..f53edb3754bc 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -61,7 +61,7 @@ int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
struct msghdr *, size_t,
rxrpc_notify_end_tx_t);
int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *,
- void *, size_t, size_t *, bool, u32 *, u16 *);
+ struct iov_iter *, bool, u32 *, u16 *);
bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
u32, int, const char *);
void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 9324ac2d9ff2..43913ae79f64 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -64,7 +64,8 @@ struct vsock_sock {
struct list_head pending_links;
struct list_head accept_queue;
bool rejected;
- struct delayed_work dwork;
+ struct delayed_work connect_work;
+ struct delayed_work pending_work;
struct delayed_work close_work;
bool close_work_scheduled;
u32 peer_shutdown;
@@ -77,7 +78,6 @@ struct vsock_sock {
s64 vsock_stream_has_data(struct vsock_sock *vsk);
s64 vsock_stream_has_space(struct vsock_sock *vsk);
-void vsock_pending_work(struct work_struct *work);
struct sock *__vsock_create(struct net *net,
struct socket *sock,
struct sock *parent,
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index 53ce8176c313..ec9d6bc65855 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -271,7 +271,7 @@ int bt_sock_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
int flags);
int bt_sock_stream_recvmsg(struct socket *sock, struct msghdr *msg,
size_t len, int flags);
-__poll_t bt_sock_poll_mask(struct socket *sock, __poll_t events);
+__poll_t bt_sock_poll(struct file *file, struct socket *sock, poll_table *wait);
int bt_sock_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
int bt_sock_wait_ready(struct sock *sk, unsigned long flags);
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 1668211297a9..cdd9f1fe7cfa 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -183,6 +183,15 @@ enum {
* during the hdev->setup vendor callback.
*/
HCI_QUIRK_NON_PERSISTENT_DIAG,
+
+ /* When this quirk is set, setup() would be run after every
+ * open() and not just after the first open().
+ *
+ * This quirk can be set before hci_register_dev is called or
+ * during the hdev->setup vendor callback.
+ *
+ */
+ HCI_QUIRK_NON_PERSISTENT_SETUP,
};
/* HCI device flags */
@@ -260,6 +269,7 @@ enum {
HCI_VENDOR_DIAG,
HCI_FORCE_BREDR_SMP,
HCI_FORCE_STATIC_ADDR,
+ HCI_LL_RPA_RESOLUTION,
__HCI_NUM_FLAGS,
};
@@ -291,6 +301,14 @@ enum {
#define HCI_DH3 0x0800
#define HCI_DH5 0x8000
+/* HCI packet types inverted masks */
+#define HCI_2DH1 0x0002
+#define HCI_3DH1 0x0004
+#define HCI_2DH3 0x0100
+#define HCI_3DH3 0x0200
+#define HCI_2DH5 0x1000
+#define HCI_3DH5 0x2000
+
#define HCI_HV1 0x0020
#define HCI_HV2 0x0040
#define HCI_HV3 0x0080
@@ -354,6 +372,8 @@ enum {
#define LMP_PCONTROL 0x04
#define LMP_TRANSPARENT 0x08
+#define LMP_EDR_2M 0x02
+#define LMP_EDR_3M 0x04
#define LMP_RSSI_INQ 0x40
#define LMP_ESCO 0x80
@@ -361,7 +381,9 @@ enum {
#define LMP_EV5 0x02
#define LMP_NO_BREDR 0x20
#define LMP_LE 0x40
+#define LMP_EDR_3SLOT 0x80
+#define LMP_EDR_5SLOT 0x01
#define LMP_SNIFF_SUBR 0x02
#define LMP_PAUSE_ENC 0x04
#define LMP_EDR_ESCO_2M 0x20
@@ -398,7 +420,12 @@ enum {
#define HCI_LE_SLAVE_FEATURES 0x08
#define HCI_LE_PING 0x10
#define HCI_LE_DATA_LEN_EXT 0x20
+#define HCI_LE_PHY_2M 0x01
+#define HCI_LE_PHY_CODED 0x08
+#define HCI_LE_EXT_ADV 0x10
#define HCI_LE_EXT_SCAN_POLICY 0x80
+#define HCI_LE_PHY_2M 0x01
+#define HCI_LE_PHY_CODED 0x08
#define HCI_LE_CHAN_SEL_ALG2 0x40
/* Connection modes */
@@ -1490,6 +1517,16 @@ struct hci_cp_le_write_def_data_len {
__le16 tx_time;
} __packed;
+#define HCI_OP_LE_CLEAR_RESOLV_LIST 0x2029
+
+#define HCI_OP_LE_READ_RESOLV_LIST_SIZE 0x202a
+struct hci_rp_le_read_resolv_list_size {
+ __u8 status;
+ __u8 size;
+} __packed;
+
+#define HCI_OP_LE_SET_ADDR_RESOLV_ENABLE 0x202d
+
#define HCI_OP_LE_READ_MAX_DATA_LEN 0x202f
struct hci_rp_le_read_max_data_len {
__u8 status;
@@ -1506,6 +1543,134 @@ struct hci_cp_le_set_default_phy {
__u8 rx_phys;
} __packed;
+#define HCI_LE_SET_PHY_1M 0x01
+#define HCI_LE_SET_PHY_2M 0x02
+#define HCI_LE_SET_PHY_CODED 0x04
+
+#define HCI_OP_LE_SET_EXT_SCAN_PARAMS 0x2041
+struct hci_cp_le_set_ext_scan_params {
+ __u8 own_addr_type;
+ __u8 filter_policy;
+ __u8 scanning_phys;
+ __u8 data[0];
+} __packed;
+
+#define LE_SCAN_PHY_1M 0x01
+#define LE_SCAN_PHY_2M 0x02
+#define LE_SCAN_PHY_CODED 0x04
+
+struct hci_cp_le_scan_phy_params {
+ __u8 type;
+ __le16 interval;
+ __le16 window;
+} __packed;
+
+#define HCI_OP_LE_SET_EXT_SCAN_ENABLE 0x2042
+struct hci_cp_le_set_ext_scan_enable {
+ __u8 enable;
+ __u8 filter_dup;
+ __le16 duration;
+ __le16 period;
+} __packed;
+
+#define HCI_OP_LE_EXT_CREATE_CONN 0x2043
+struct hci_cp_le_ext_create_conn {
+ __u8 filter_policy;
+ __u8 own_addr_type;
+ __u8 peer_addr_type;
+ bdaddr_t peer_addr;
+ __u8 phys;
+ __u8 data[0];
+} __packed;
+
+struct hci_cp_le_ext_conn_param {
+ __le16 scan_interval;
+ __le16 scan_window;
+ __le16 conn_interval_min;
+ __le16 conn_interval_max;
+ __le16 conn_latency;
+ __le16 supervision_timeout;
+ __le16 min_ce_len;
+ __le16 max_ce_len;
+} __packed;
+
+#define HCI_OP_LE_READ_NUM_SUPPORTED_ADV_SETS 0x203b
+struct hci_rp_le_read_num_supported_adv_sets {
+ __u8 status;
+ __u8 num_of_sets;
+} __packed;
+
+#define HCI_OP_LE_SET_EXT_ADV_PARAMS 0x2036
+struct hci_cp_le_set_ext_adv_params {
+ __u8 handle;
+ __le16 evt_properties;
+ __u8 min_interval[3];
+ __u8 max_interval[3];
+ __u8 channel_map;
+ __u8 own_addr_type;
+ __u8 peer_addr_type;
+ bdaddr_t peer_addr;
+ __u8 filter_policy;
+ __u8 tx_power;
+ __u8 primary_phy;
+ __u8 secondary_max_skip;
+ __u8 secondary_phy;
+ __u8 sid;
+ __u8 notif_enable;
+} __packed;
+
+#define HCI_ADV_PHY_1M 0X01
+#define HCI_ADV_PHY_2M 0x02
+#define HCI_ADV_PHY_CODED 0x03
+
+struct hci_rp_le_set_ext_adv_params {
+ __u8 status;
+ __u8 tx_power;
+} __packed;
+
+#define HCI_OP_LE_SET_EXT_ADV_ENABLE 0x2039
+struct hci_cp_le_set_ext_adv_enable {
+ __u8 enable;
+ __u8 num_of_sets;
+ __u8 data[0];
+} __packed;
+
+struct hci_cp_ext_adv_set {
+ __u8 handle;
+ __le16 duration;
+ __u8 max_events;
+} __packed;
+
+#define HCI_OP_LE_SET_EXT_ADV_DATA 0x2037
+struct hci_cp_le_set_ext_adv_data {
+ __u8 handle;
+ __u8 operation;
+ __u8 frag_pref;
+ __u8 length;
+ __u8 data[HCI_MAX_AD_LENGTH];
+} __packed;
+
+#define HCI_OP_LE_SET_EXT_SCAN_RSP_DATA 0x2038
+struct hci_cp_le_set_ext_scan_rsp_data {
+ __u8 handle;
+ __u8 operation;
+ __u8 frag_pref;
+ __u8 length;
+ __u8 data[HCI_MAX_AD_LENGTH];
+} __packed;
+
+#define LE_SET_ADV_DATA_OP_COMPLETE 0x03
+
+#define LE_SET_ADV_DATA_NO_FRAG 0x01
+
+#define HCI_OP_LE_CLEAR_ADV_SETS 0x203d
+
+#define HCI_OP_LE_SET_ADV_SET_RAND_ADDR 0x2035
+struct hci_cp_le_set_adv_set_rand_addr {
+ __u8 handle;
+ bdaddr_t bdaddr;
+} __packed;
+
/* ---- HCI Events ---- */
#define HCI_EV_INQUIRY_COMPLETE 0x01
@@ -1893,6 +2058,23 @@ struct hci_ev_le_conn_complete {
#define LE_ADV_SCAN_IND 0x02
#define LE_ADV_NONCONN_IND 0x03
#define LE_ADV_SCAN_RSP 0x04
+#define LE_ADV_INVALID 0x05
+
+/* Legacy event types in extended adv report */
+#define LE_LEGACY_ADV_IND 0x0013
+#define LE_LEGACY_ADV_DIRECT_IND 0x0015
+#define LE_LEGACY_ADV_SCAN_IND 0x0012
+#define LE_LEGACY_NONCONN_IND 0x0010
+#define LE_LEGACY_SCAN_RSP_ADV 0x001b
+#define LE_LEGACY_SCAN_RSP_ADV_SCAN 0x001a
+
+/* Extended Advertising event types */
+#define LE_EXT_ADV_NON_CONN_IND 0x0000
+#define LE_EXT_ADV_CONN_IND 0x0001
+#define LE_EXT_ADV_SCAN_IND 0x0002
+#define LE_EXT_ADV_DIRECT_IND 0x0004
+#define LE_EXT_ADV_SCAN_RSP 0x0008
+#define LE_EXT_ADV_LEGACY_PDU 0x0010
#define ADDR_LE_DEV_PUBLIC 0x00
#define ADDR_LE_DEV_RANDOM 0x01
@@ -1957,6 +2139,48 @@ struct hci_ev_le_direct_adv_info {
__s8 rssi;
} __packed;
+#define HCI_EV_LE_EXT_ADV_REPORT 0x0d
+struct hci_ev_le_ext_adv_report {
+ __le16 evt_type;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ __u8 primary_phy;
+ __u8 secondary_phy;
+ __u8 sid;
+ __u8 tx_power;
+ __s8 rssi;
+ __le16 interval;
+ __u8 direct_addr_type;
+ bdaddr_t direct_addr;
+ __u8 length;
+ __u8 data[0];
+} __packed;
+
+#define HCI_EV_LE_ENHANCED_CONN_COMPLETE 0x0a
+struct hci_ev_le_enh_conn_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 role;
+ __u8 bdaddr_type;
+ bdaddr_t bdaddr;
+ bdaddr_t local_rpa;
+ bdaddr_t peer_rpa;
+ __le16 interval;
+ __le16 latency;
+ __le16 supervision_timeout;
+ __u8 clk_accurancy;
+} __packed;
+
+#define HCI_EV_LE_EXT_ADV_SET_TERM 0x12
+struct hci_evt_le_ext_adv_set_term {
+ __u8 status;
+ __u8 handle;
+ __le16 conn_handle;
+ __u8 num_evts;
+} __packed;
+
+#define HCI_EV_VENDOR 0xff
+
/* Internal events generated by Bluetooth stack */
#define HCI_EV_STACK_INTERNAL 0xfd
struct hci_ev_stack_internal {
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index 893bbbb5d2fa..0db1b9b428b7 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -171,6 +171,10 @@ struct adv_info {
__u8 adv_data[HCI_MAX_AD_LENGTH];
__u16 scan_rsp_len;
__u8 scan_rsp_data[HCI_MAX_AD_LENGTH];
+ __s8 tx_power;
+ bdaddr_t random_addr;
+ bool rpa_expired;
+ struct delayed_work rpa_expired_cb;
};
#define HCI_MAX_ADV_INSTANCES 5
@@ -221,6 +225,8 @@ struct hci_dev {
__u8 features[HCI_MAX_PAGES][8];
__u8 le_features[8];
__u8 le_white_list_size;
+ __u8 le_resolv_list_size;
+ __u8 le_num_of_adv_sets;
__u8 le_states[8];
__u8 commands[64];
__u8 hci_ver;
@@ -314,6 +320,9 @@ struct hci_dev {
unsigned long sco_last_tx;
unsigned long le_last_tx;
+ __u8 le_tx_def_phys;
+ __u8 le_rx_def_phys;
+
struct workqueue_struct *workqueue;
struct workqueue_struct *req_workqueue;
@@ -367,6 +376,7 @@ struct hci_dev {
struct list_head identity_resolving_keys;
struct list_head remote_oob_data;
struct list_head le_white_list;
+ struct list_head le_resolv_list;
struct list_head le_conn_params;
struct list_head pend_le_conns;
struct list_head pend_le_reports;
@@ -1106,6 +1116,7 @@ int hci_add_adv_instance(struct hci_dev *hdev, u8 instance, u32 flags,
u16 scan_rsp_len, u8 *scan_rsp_data,
u16 timeout, u16 duration);
int hci_remove_adv_instance(struct hci_dev *hdev, u8 instance);
+void hci_adv_instances_set_rpa_expired(struct hci_dev *hdev, bool rpa_expired);
void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb);
@@ -1136,6 +1147,10 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define lmp_inq_tx_pwr_capable(dev) ((dev)->features[0][7] & LMP_INQ_TX_PWR)
#define lmp_ext_feat_capable(dev) ((dev)->features[0][7] & LMP_EXTFEATURES)
#define lmp_transp_capable(dev) ((dev)->features[0][2] & LMP_TRANSPARENT)
+#define lmp_edr_2m_capable(dev) ((dev)->features[0][3] & LMP_EDR_2M)
+#define lmp_edr_3m_capable(dev) ((dev)->features[0][3] & LMP_EDR_3M)
+#define lmp_edr_3slot_capable(dev) ((dev)->features[0][4] & LMP_EDR_3SLOT)
+#define lmp_edr_5slot_capable(dev) ((dev)->features[0][5] & LMP_EDR_5SLOT)
/* ----- Extended LMP capabilities ----- */
#define lmp_csb_master_capable(dev) ((dev)->features[2][0] & LMP_CSB_MASTER)
@@ -1156,6 +1171,24 @@ void hci_conn_del_sysfs(struct hci_conn *conn);
#define bredr_sc_enabled(dev) (lmp_sc_capable(dev) && \
hci_dev_test_flag(dev, HCI_SC_ENABLED))
+#define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \
+ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M))
+
+#define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \
+ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M))
+
+#define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \
+ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED))
+
+/* Use ext scanning if set ext scan param and ext scan enable is supported */
+#define use_ext_scan(dev) (((dev)->commands[37] & 0x20) && \
+ ((dev)->commands[37] & 0x40))
+/* Use ext create connection if command is supported */
+#define use_ext_conn(dev) ((dev)->commands[37] & 0x80)
+
+/* Extended advertising support */
+#define ext_adv_capable(dev) (((dev)->le_features[1] & HCI_LE_EXT_ADV))
+
/* ----- HCI protocols ----- */
#define HCI_PROTO_DEFER 0x01
@@ -1529,6 +1562,7 @@ void mgmt_advertising_added(struct sock *sk, struct hci_dev *hdev,
u8 instance);
void mgmt_advertising_removed(struct sock *sk, struct hci_dev *hdev,
u8 instance);
+int mgmt_phy_configuration_changed(struct hci_dev *hdev, struct sock *skip);
u8 hci_le_conn_update(struct hci_conn *conn, u16 min, u16 max, u16 latency,
u16 to_multiplier);
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index e7303eee65cd..9cee7ddc6741 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -101,6 +101,7 @@ struct mgmt_rp_read_index_list {
#define MGMT_SETTING_PRIVACY 0x00002000
#define MGMT_SETTING_CONFIGURATION 0x00004000
#define MGMT_SETTING_STATIC_ADDRESS 0x00008000
+#define MGMT_SETTING_PHY_CONFIGURATION 0x00010000
#define MGMT_OP_READ_INFO 0x0004
#define MGMT_READ_INFO_SIZE 0
@@ -561,6 +562,12 @@ struct mgmt_rp_add_advertising {
#define MGMT_ADV_FLAG_TX_POWER BIT(4)
#define MGMT_ADV_FLAG_APPEARANCE BIT(5)
#define MGMT_ADV_FLAG_LOCAL_NAME BIT(6)
+#define MGMT_ADV_FLAG_SEC_1M BIT(7)
+#define MGMT_ADV_FLAG_SEC_2M BIT(8)
+#define MGMT_ADV_FLAG_SEC_CODED BIT(9)
+
+#define MGMT_ADV_FLAG_SEC_MASK (MGMT_ADV_FLAG_SEC_1M | MGMT_ADV_FLAG_SEC_2M | \
+ MGMT_ADV_FLAG_SEC_CODED)
#define MGMT_OP_REMOVE_ADVERTISING 0x003F
struct mgmt_cp_remove_advertising {
@@ -604,6 +611,49 @@ struct mgmt_cp_set_appearance {
} __packed;
#define MGMT_SET_APPEARANCE_SIZE 2
+#define MGMT_OP_GET_PHY_CONFIGURATION 0x0044
+struct mgmt_rp_get_phy_confguration {
+ __le32 supported_phys;
+ __le32 configurable_phys;
+ __le32 selected_phys;
+} __packed;
+#define MGMT_GET_PHY_CONFIGURATION_SIZE 0
+
+#define MGMT_PHY_BR_1M_1SLOT 0x00000001
+#define MGMT_PHY_BR_1M_3SLOT 0x00000002
+#define MGMT_PHY_BR_1M_5SLOT 0x00000004
+#define MGMT_PHY_EDR_2M_1SLOT 0x00000008
+#define MGMT_PHY_EDR_2M_3SLOT 0x00000010
+#define MGMT_PHY_EDR_2M_5SLOT 0x00000020
+#define MGMT_PHY_EDR_3M_1SLOT 0x00000040
+#define MGMT_PHY_EDR_3M_3SLOT 0x00000080
+#define MGMT_PHY_EDR_3M_5SLOT 0x00000100
+#define MGMT_PHY_LE_1M_TX 0x00000200
+#define MGMT_PHY_LE_1M_RX 0x00000400
+#define MGMT_PHY_LE_2M_TX 0x00000800
+#define MGMT_PHY_LE_2M_RX 0x00001000
+#define MGMT_PHY_LE_CODED_TX 0x00002000
+#define MGMT_PHY_LE_CODED_RX 0x00004000
+
+#define MGMT_PHY_BREDR_MASK (MGMT_PHY_BR_1M_1SLOT | MGMT_PHY_BR_1M_3SLOT | \
+ MGMT_PHY_BR_1M_5SLOT | MGMT_PHY_EDR_2M_1SLOT | \
+ MGMT_PHY_EDR_2M_3SLOT | MGMT_PHY_EDR_2M_5SLOT | \
+ MGMT_PHY_EDR_3M_1SLOT | MGMT_PHY_EDR_3M_3SLOT | \
+ MGMT_PHY_EDR_3M_5SLOT)
+#define MGMT_PHY_LE_MASK (MGMT_PHY_LE_1M_TX | MGMT_PHY_LE_1M_RX | \
+ MGMT_PHY_LE_2M_TX | MGMT_PHY_LE_2M_RX | \
+ MGMT_PHY_LE_CODED_TX | MGMT_PHY_LE_CODED_RX)
+#define MGMT_PHY_LE_TX_MASK (MGMT_PHY_LE_1M_TX | MGMT_PHY_LE_2M_TX | \
+ MGMT_PHY_LE_CODED_TX)
+#define MGMT_PHY_LE_RX_MASK (MGMT_PHY_LE_1M_RX | MGMT_PHY_LE_2M_RX | \
+ MGMT_PHY_LE_CODED_RX)
+
+#define MGMT_OP_SET_PHY_CONFIGURATION 0x0045
+struct mgmt_cp_set_phy_confguration {
+ __le32 selected_phys;
+} __packed;
+#define MGMT_SET_PHY_CONFIGURATION_SIZE 4
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
@@ -824,3 +874,8 @@ struct mgmt_ev_ext_info_changed {
__le16 eir_len;
__u8 eir[0];
} __packed;
+
+#define MGMT_EV_PHY_CONFIGURATION_CHANGED 0x0026
+struct mgmt_ev_phy_configuration_changed {
+ __le32 selected_phys;
+} __packed;
diff --git a/include/net/bond_3ad.h b/include/net/bond_3ad.h
index f358ad5e4214..fc3111515f5c 100644
--- a/include/net/bond_3ad.h
+++ b/include/net/bond_3ad.h
@@ -283,7 +283,7 @@ static inline const char *bond_3ad_churn_desc(churn_state_t state)
"none",
"unknown"
};
- int max_size = sizeof(churn_description) / sizeof(churn_description[0]);
+ int max_size = ARRAY_SIZE(churn_description);
if (state >= max_size)
state = max_size - 1;
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 808f1d167349..a2d058170ea3 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -411,6 +411,19 @@ static inline bool bond_slave_can_tx(struct slave *slave)
bond_is_active_slave(slave);
}
+static inline bool bond_is_active_slave_dev(const struct net_device *slave_dev)
+{
+ struct slave *slave;
+ bool active;
+
+ rcu_read_lock();
+ slave = bond_slave_get_rcu(slave_dev);
+ active = bond_is_active_slave(slave);
+ rcu_read_unlock();
+
+ return active;
+}
+
static inline void bond_hw_addr_copy(u8 *dst, const u8 *src, unsigned int len)
{
if (len == ETH_ALEN) {
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index c5187438af38..ba61cdd09eaa 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -121,21 +121,6 @@ static inline void sk_busy_loop(struct sock *sk, int nonblock)
#endif
}
-static inline void sock_poll_busy_loop(struct socket *sock, __poll_t events)
-{
- if (sk_can_busy_loop(sock->sk) &&
- events && (events & POLL_BUSY_LOOP)) {
- /* once, only if requested by syscall */
- sk_busy_loop(sock->sk, 1);
- }
-}
-
-/* if this socket can poll_ll, tell the system call */
-static inline __poll_t sock_poll_busy_flag(struct socket *sock)
-{
- return sk_can_busy_loop(sock->sk) ? POLL_BUSY_LOOP : 0;
-}
-
/* used in the NIC receive handler to mark the skb */
static inline void skb_mark_napi_id(struct sk_buff *skb,
struct napi_struct *napi)
@@ -151,6 +136,7 @@ static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
#ifdef CONFIG_NET_RX_BUSY_POLL
sk->sk_napi_id = skb->napi_id;
#endif
+ sk_rx_queue_set(sk, skb);
}
/* variant used for unconnected sockets */
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 5fbfe61f41c6..9a850973e09a 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -285,6 +285,41 @@ struct ieee80211_sta_vht_cap {
struct ieee80211_vht_mcs_info vht_mcs;
};
+#define IEEE80211_HE_PPE_THRES_MAX_LEN 25
+
+/**
+ * struct ieee80211_sta_he_cap - STA's HE capabilities
+ *
+ * This structure describes most essential parameters needed
+ * to describe 802.11ax HE capabilities for a STA.
+ *
+ * @has_he: true iff HE data is valid.
+ * @he_cap_elem: Fixed portion of the HE capabilities element.
+ * @he_mcs_nss_supp: The supported NSS/MCS combinations.
+ * @ppe_thres: Holds the PPE Thresholds data.
+ */
+struct ieee80211_sta_he_cap {
+ bool has_he;
+ struct ieee80211_he_cap_elem he_cap_elem;
+ struct ieee80211_he_mcs_nss_supp he_mcs_nss_supp;
+ u8 ppe_thres[IEEE80211_HE_PPE_THRES_MAX_LEN];
+};
+
+/**
+ * struct ieee80211_sband_iftype_data
+ *
+ * This structure encapsulates sband data that is relevant for the
+ * interface types defined in @types_mask. Each type in the
+ * @types_mask must be unique across all instances of iftype_data.
+ *
+ * @types_mask: interface types mask
+ * @he_cap: holds the HE capabilities
+ */
+struct ieee80211_sband_iftype_data {
+ u16 types_mask;
+ struct ieee80211_sta_he_cap he_cap;
+};
+
/**
* struct ieee80211_supported_band - frequency band definition
*
@@ -301,6 +336,11 @@ struct ieee80211_sta_vht_cap {
* @n_bitrates: Number of bitrates in @bitrates
* @ht_cap: HT capabilities in this band
* @vht_cap: VHT capabilities in this band
+ * @n_iftype_data: number of iftype data entries
+ * @iftype_data: interface type data entries. Note that the bits in
+ * @types_mask inside this structure cannot overlap (i.e. only
+ * one occurrence of each type is allowed across all instances of
+ * iftype_data).
*/
struct ieee80211_supported_band {
struct ieee80211_channel *channels;
@@ -310,9 +350,56 @@ struct ieee80211_supported_band {
int n_bitrates;
struct ieee80211_sta_ht_cap ht_cap;
struct ieee80211_sta_vht_cap vht_cap;
+ u16 n_iftype_data;
+ const struct ieee80211_sband_iftype_data *iftype_data;
};
/**
+ * ieee80211_get_sband_iftype_data - return sband data for a given iftype
+ * @sband: the sband to search for the STA on
+ * @iftype: enum nl80211_iftype
+ *
+ * Return: pointer to struct ieee80211_sband_iftype_data, or NULL is none found
+ */
+static inline const struct ieee80211_sband_iftype_data *
+ieee80211_get_sband_iftype_data(const struct ieee80211_supported_band *sband,
+ u8 iftype)
+{
+ int i;
+
+ if (WARN_ON(iftype >= NL80211_IFTYPE_MAX))
+ return NULL;
+
+ for (i = 0; i < sband->n_iftype_data; i++) {
+ const struct ieee80211_sband_iftype_data *data =
+ &sband->iftype_data[i];
+
+ if (data->types_mask & BIT(iftype))
+ return data;
+ }
+
+ return NULL;
+}
+
+/**
+ * ieee80211_get_he_sta_cap - return HE capabilities for an sband's STA
+ * @sband: the sband to search for the STA on
+ *
+ * Return: pointer to the struct ieee80211_sta_he_cap, or NULL is none found
+ */
+static inline const struct ieee80211_sta_he_cap *
+ieee80211_get_he_sta_cap(const struct ieee80211_supported_band *sband)
+{
+ const struct ieee80211_sband_iftype_data *data =
+ ieee80211_get_sband_iftype_data(sband, NL80211_IFTYPE_STATION);
+
+ if (data && data->he_cap.has_he)
+ return &data->he_cap;
+
+ return NULL;
+}
+
+/**
* wiphy_read_of_freq_limits - read frequency limits from device tree
*
* @wiphy: the wireless device to get extra limits for
@@ -899,6 +986,8 @@ enum station_parameters_apply_mask {
* @opmode_notif: operating mode field from Operating Mode Notification
* @opmode_notif_used: information if operating mode field is used
* @support_p2p_ps: information if station supports P2P PS mechanism
+ * @he_capa: HE capabilities of station
+ * @he_capa_len: the length of the HE capabilities
*/
struct station_parameters {
const u8 *supported_rates;
@@ -926,6 +1015,8 @@ struct station_parameters {
u8 opmode_notif;
bool opmode_notif_used;
int support_p2p_ps;
+ const struct ieee80211_he_cap_elem *he_capa;
+ u8 he_capa_len;
};
/**
@@ -1000,12 +1091,14 @@ int cfg80211_check_station_change(struct wiphy *wiphy,
* @RATE_INFO_FLAGS_VHT_MCS: mcs field filled with VHT MCS
* @RATE_INFO_FLAGS_SHORT_GI: 400ns guard interval
* @RATE_INFO_FLAGS_60G: 60GHz MCS
+ * @RATE_INFO_FLAGS_HE_MCS: HE MCS information
*/
enum rate_info_flags {
RATE_INFO_FLAGS_MCS = BIT(0),
RATE_INFO_FLAGS_VHT_MCS = BIT(1),
RATE_INFO_FLAGS_SHORT_GI = BIT(2),
RATE_INFO_FLAGS_60G = BIT(3),
+ RATE_INFO_FLAGS_HE_MCS = BIT(4),
};
/**
@@ -1019,6 +1112,7 @@ enum rate_info_flags {
* @RATE_INFO_BW_40: 40 MHz bandwidth
* @RATE_INFO_BW_80: 80 MHz bandwidth
* @RATE_INFO_BW_160: 160 MHz bandwidth
+ * @RATE_INFO_BW_HE_RU: bandwidth determined by HE RU allocation
*/
enum rate_info_bw {
RATE_INFO_BW_20 = 0,
@@ -1027,6 +1121,7 @@ enum rate_info_bw {
RATE_INFO_BW_40,
RATE_INFO_BW_80,
RATE_INFO_BW_160,
+ RATE_INFO_BW_HE_RU,
};
/**
@@ -1035,10 +1130,14 @@ enum rate_info_bw {
* Information about a receiving or transmitting bitrate
*
* @flags: bitflag of flags from &enum rate_info_flags
- * @mcs: mcs index if struct describes a 802.11n bitrate
+ * @mcs: mcs index if struct describes an HT/VHT/HE rate
* @legacy: bitrate in 100kbit/s for 802.11abg
- * @nss: number of streams (VHT only)
+ * @nss: number of streams (VHT & HE only)
* @bw: bandwidth (from &enum rate_info_bw)
+ * @he_gi: HE guard interval (from &enum nl80211_he_gi)
+ * @he_dcm: HE DCM value
+ * @he_ru_alloc: HE RU allocation (from &enum nl80211_he_ru_alloc,
+ * only valid if bw is %RATE_INFO_BW_HE_RU)
*/
struct rate_info {
u8 flags;
@@ -1046,6 +1145,9 @@ struct rate_info {
u16 legacy;
u8 nss;
u8 bw;
+ u8 he_gi;
+ u8 he_dcm;
+ u8 he_ru_alloc;
};
/**
@@ -5835,10 +5937,11 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
/**
* cfg80211_rx_control_port - notification about a received control port frame
* @dev: The device the frame matched to
- * @buf: control port frame
- * @len: length of the frame data
- * @addr: The peer from which the frame was received
- * @proto: frame protocol, typically PAE or Pre-authentication
+ * @skb: The skbuf with the control port frame. It is assumed that the skbuf
+ * is 802.3 formatted (with 802.3 header). The skb can be non-linear.
+ * This function does not take ownership of the skb, so the caller is
+ * responsible for any cleanup. The caller must also ensure that
+ * skb->protocol is set appropriately.
* @unencrypted: Whether the frame was received unencrypted
*
* This function is used to inform userspace about a received control port
@@ -5851,8 +5954,7 @@ void cfg80211_mgmt_tx_status(struct wireless_dev *wdev, u64 cookie,
* Return: %true if the frame was passed to userspace
*/
bool cfg80211_rx_control_port(struct net_device *dev,
- const u8 *buf, size_t len,
- const u8 *addr, u16 proto, bool unencrypted);
+ struct sk_buff *skb, bool unencrypted);
/**
* cfg80211_cqm_rssi_notify - connection quality monitoring rssi event
diff --git a/include/net/dcbnl.h b/include/net/dcbnl.h
index 0e5e91be2d30..e22a8a3c089b 100644
--- a/include/net/dcbnl.h
+++ b/include/net/dcbnl.h
@@ -34,6 +34,19 @@ int dcb_ieee_setapp(struct net_device *, struct dcb_app *);
int dcb_ieee_delapp(struct net_device *, struct dcb_app *);
u8 dcb_ieee_getapp_mask(struct net_device *, struct dcb_app *);
+struct dcb_ieee_app_prio_map {
+ u64 map[IEEE_8021QAZ_MAX_TCS];
+};
+void dcb_ieee_getapp_prio_dscp_mask_map(const struct net_device *dev,
+ struct dcb_ieee_app_prio_map *p_map);
+
+struct dcb_ieee_app_dscp_map {
+ u8 map[64];
+};
+void dcb_ieee_getapp_dscp_prio_mask_map(const struct net_device *dev,
+ struct dcb_ieee_app_dscp_map *p_map);
+u8 dcb_ieee_getapp_default_prio_mask(const struct net_device *dev);
+
int dcbnl_ieee_notify(struct net_device *dev, int event, int cmd,
u32 seq, u32 pid);
int dcbnl_cee_notify(struct net_device *dev, int event, int cmd,
diff --git a/include/net/devlink.h b/include/net/devlink.h
index e336ea9c73df..b9b89d6604d4 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -27,6 +27,9 @@ struct devlink {
struct list_head sb_list;
struct list_head dpipe_table_list;
struct list_head resource_list;
+ struct list_head param_list;
+ struct list_head region_list;
+ u32 snapshot_id;
struct devlink_dpipe_headers *dpipe_headers;
const struct devlink_ops *ops;
struct device *dev;
@@ -295,6 +298,115 @@ struct devlink_resource {
#define DEVLINK_RESOURCE_ID_PARENT_TOP 0
+#define DEVLINK_PARAM_MAX_STRING_VALUE 32
+enum devlink_param_type {
+ DEVLINK_PARAM_TYPE_U8,
+ DEVLINK_PARAM_TYPE_U16,
+ DEVLINK_PARAM_TYPE_U32,
+ DEVLINK_PARAM_TYPE_STRING,
+ DEVLINK_PARAM_TYPE_BOOL,
+};
+
+union devlink_param_value {
+ u8 vu8;
+ u16 vu16;
+ u32 vu32;
+ const char *vstr;
+ bool vbool;
+};
+
+struct devlink_param_gset_ctx {
+ union devlink_param_value val;
+ enum devlink_param_cmode cmode;
+};
+
+/**
+ * struct devlink_param - devlink configuration parameter data
+ * @name: name of the parameter
+ * @generic: indicates if the parameter is generic or driver specific
+ * @type: parameter type
+ * @supported_cmodes: bitmap of supported configuration modes
+ * @get: get parameter value, used for runtime and permanent
+ * configuration modes
+ * @set: set parameter value, used for runtime and permanent
+ * configuration modes
+ * @validate: validate input value is applicable (within value range, etc.)
+ *
+ * This struct should be used by the driver to fill the data for
+ * a parameter it registers.
+ */
+struct devlink_param {
+ u32 id;
+ const char *name;
+ bool generic;
+ enum devlink_param_type type;
+ unsigned long supported_cmodes;
+ int (*get)(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+ int (*set)(struct devlink *devlink, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+ int (*validate)(struct devlink *devlink, u32 id,
+ union devlink_param_value val,
+ struct netlink_ext_ack *extack);
+};
+
+struct devlink_param_item {
+ struct list_head list;
+ const struct devlink_param *param;
+ union devlink_param_value driverinit_value;
+ bool driverinit_value_valid;
+};
+
+enum devlink_param_generic_id {
+ DEVLINK_PARAM_GENERIC_ID_INT_ERR_RESET,
+ DEVLINK_PARAM_GENERIC_ID_MAX_MACS,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_SRIOV,
+ DEVLINK_PARAM_GENERIC_ID_REGION_SNAPSHOT,
+
+ /* add new param generic ids above here*/
+ __DEVLINK_PARAM_GENERIC_ID_MAX,
+ DEVLINK_PARAM_GENERIC_ID_MAX = __DEVLINK_PARAM_GENERIC_ID_MAX - 1,
+};
+
+#define DEVLINK_PARAM_GENERIC_INT_ERR_RESET_NAME "internal_error_reset"
+#define DEVLINK_PARAM_GENERIC_INT_ERR_RESET_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_MAX_MACS_NAME "max_macs"
+#define DEVLINK_PARAM_GENERIC_MAX_MACS_TYPE DEVLINK_PARAM_TYPE_U32
+
+#define DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_NAME "enable_sriov"
+#define DEVLINK_PARAM_GENERIC_ENABLE_SRIOV_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_NAME "region_snapshot_enable"
+#define DEVLINK_PARAM_GENERIC_REGION_SNAPSHOT_TYPE DEVLINK_PARAM_TYPE_BOOL
+
+#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
+{ \
+ .id = DEVLINK_PARAM_GENERIC_ID_##_id, \
+ .name = DEVLINK_PARAM_GENERIC_##_id##_NAME, \
+ .type = DEVLINK_PARAM_GENERIC_##_id##_TYPE, \
+ .generic = true, \
+ .supported_cmodes = _cmodes, \
+ .get = _get, \
+ .set = _set, \
+ .validate = _validate, \
+}
+
+#define DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes, _get, _set, _validate) \
+{ \
+ .id = _id, \
+ .name = _name, \
+ .type = _type, \
+ .supported_cmodes = _cmodes, \
+ .get = _get, \
+ .set = _set, \
+ .validate = _validate, \
+}
+
+struct devlink_region;
+
+typedef void devlink_snapshot_data_dest_t(const void *data);
+
struct devlink_ops {
int (*reload)(struct devlink *devlink, struct netlink_ext_ack *extack);
int (*port_type_set)(struct devlink_port *devlink_port,
@@ -430,6 +542,26 @@ void devlink_resource_occ_get_register(struct devlink *devlink,
void *occ_get_priv);
void devlink_resource_occ_get_unregister(struct devlink *devlink,
u64 resource_id);
+int devlink_params_register(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count);
+void devlink_params_unregister(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count);
+int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
+ union devlink_param_value *init_val);
+int devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
+ union devlink_param_value init_val);
+void devlink_param_value_changed(struct devlink *devlink, u32 param_id);
+struct devlink_region *devlink_region_create(struct devlink *devlink,
+ const char *region_name,
+ u32 region_max_snapshots,
+ u64 region_size);
+void devlink_region_destroy(struct devlink_region *region);
+u32 devlink_region_shapshot_id_get(struct devlink *devlink);
+int devlink_region_snapshot_create(struct devlink_region *region, u64 data_len,
+ u8 *data, u32 snapshot_id,
+ devlink_snapshot_data_dest_t *data_destructor);
#else
@@ -622,6 +754,69 @@ devlink_resource_occ_get_unregister(struct devlink *devlink,
{
}
+static inline int
+devlink_params_register(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+ return 0;
+}
+
+static inline void
+devlink_params_unregister(struct devlink *devlink,
+ const struct devlink_param *params,
+ size_t params_count)
+{
+
+}
+
+static inline int
+devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
+ union devlink_param_value *init_val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline int
+devlink_param_driverinit_value_set(struct devlink *devlink, u32 param_id,
+ union devlink_param_value init_val)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void
+devlink_param_value_changed(struct devlink *devlink, u32 param_id)
+{
+}
+
+static inline struct devlink_region *
+devlink_region_create(struct devlink *devlink,
+ const char *region_name,
+ u32 region_max_snapshots,
+ u64 region_size)
+{
+ return NULL;
+}
+
+static inline void
+devlink_region_destroy(struct devlink_region *region)
+{
+}
+
+static inline u32
+devlink_region_shapshot_id_get(struct devlink *devlink)
+{
+ return 0;
+}
+
+static inline int
+devlink_region_snapshot_create(struct devlink_region *region, u64 data_len,
+ u8 *data, u32 snapshot_id,
+ devlink_snapshot_data_dest_t *data_destructor)
+{
+ return 0;
+}
+
#endif
#endif /* _NET_DEVLINK_H_ */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index fdbd6082945d..461e8a7661b7 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -259,6 +259,9 @@ struct dsa_switch {
/* Number of switch port queues */
unsigned int num_tx_queues;
+ unsigned long *bitmap;
+ unsigned long _bitmap;
+
/* Dynamically allocated ports, keep last */
size_t num_ports;
struct dsa_port ports[];
diff --git a/include/net/dst.h b/include/net/dst.h
index b3219cd8a5a1..7f735e76ca73 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -475,6 +475,14 @@ static inline struct dst_entry *xfrm_lookup(struct net *net,
return dst_orig;
}
+static inline struct dst_entry *
+xfrm_lookup_with_ifid(struct net *net, struct dst_entry *dst_orig,
+ const struct flowi *fl, const struct sock *sk,
+ int flags, u32 if_id)
+{
+ return dst_orig;
+}
+
static inline struct dst_entry *xfrm_lookup_route(struct net *net,
struct dst_entry *dst_orig,
const struct flowi *fl,
@@ -494,6 +502,12 @@ struct dst_entry *xfrm_lookup(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl, const struct sock *sk,
int flags);
+struct dst_entry *xfrm_lookup_with_ifid(struct net *net,
+ struct dst_entry *dst_orig,
+ const struct flowi *fl,
+ const struct sock *sk, int flags,
+ u32 if_id);
+
struct dst_entry *xfrm_lookup_route(struct net *net, struct dst_entry *dst_orig,
const struct flowi *fl, const struct sock *sk,
int flags);
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index adc24df56b90..6a4586dcdede 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -47,7 +47,7 @@ struct flow_dissector_key_tags {
struct flow_dissector_key_vlan {
u16 vlan_id:12,
vlan_priority:3;
- u16 padding;
+ __be16 vlan_tpid;
};
struct flow_dissector_key_mpls {
@@ -57,6 +57,21 @@ struct flow_dissector_key_mpls {
mpls_label:20;
};
+#define FLOW_DIS_TUN_OPTS_MAX 255
+/**
+ * struct flow_dissector_key_enc_opts:
+ * @data: tunnel option data
+ * @len: length of tunnel option data
+ * @dst_opt_type: tunnel option type
+ */
+struct flow_dissector_key_enc_opts {
+ u8 data[FLOW_DIS_TUN_OPTS_MAX]; /* Using IP_TUNNEL_OPTS_MAX is desired
+ * here but seems difficult to #include
+ */
+ u8 len;
+ __be16 dst_opt_type;
+};
+
struct flow_dissector_key_keyid {
__be32 keyid;
};
@@ -206,6 +221,9 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */
FLOW_DISSECTOR_KEY_TCP, /* struct flow_dissector_key_tcp */
FLOW_DISSECTOR_KEY_IP, /* struct flow_dissector_key_ip */
+ FLOW_DISSECTOR_KEY_CVLAN, /* struct flow_dissector_key_flow_vlan */
+ FLOW_DISSECTOR_KEY_ENC_IP, /* struct flow_dissector_key_ip */
+ FLOW_DISSECTOR_KEY_ENC_OPTS, /* struct flow_dissector_key_enc_opts */
FLOW_DISSECTOR_KEY_MAX,
};
@@ -237,6 +255,7 @@ struct flow_keys {
struct flow_dissector_key_basic basic;
struct flow_dissector_key_tags tags;
struct flow_dissector_key_vlan vlan;
+ struct flow_dissector_key_vlan cvlan;
struct flow_dissector_key_keyid keyid;
struct flow_dissector_key_ports ports;
struct flow_dissector_key_addrs addrs;
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index 0304ba2ae353..883bb9085f15 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -59,13 +59,13 @@ int gnet_stats_finish_copy(struct gnet_dump *d);
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct net_rate_estimator __rcu **rate_est,
- spinlock_t *stats_lock,
+ spinlock_t *lock,
seqcount_t *running, struct nlattr *opt);
void gen_kill_estimator(struct net_rate_estimator __rcu **ptr);
int gen_replace_estimator(struct gnet_stats_basic_packed *bstats,
struct gnet_stats_basic_cpu __percpu *cpu_bstats,
struct net_rate_estimator __rcu **ptr,
- spinlock_t *stats_lock,
+ spinlock_t *lock,
seqcount_t *running, struct nlattr *opt);
bool gen_estimator_active(struct net_rate_estimator __rcu **ptr);
bool gen_estimator_read(struct net_rate_estimator __rcu **ptr,
diff --git a/include/net/ieee80211_radiotap.h b/include/net/ieee80211_radiotap.h
index 960236fb1681..feef706e1158 100644
--- a/include/net/ieee80211_radiotap.h
+++ b/include/net/ieee80211_radiotap.h
@@ -1,5 +1,6 @@
/*
* Copyright (c) 2017 Intel Deutschland GmbH
+ * Copyright (c) 2018 Intel Corporation
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -72,6 +73,8 @@ enum ieee80211_radiotap_presence {
IEEE80211_RADIOTAP_AMPDU_STATUS = 20,
IEEE80211_RADIOTAP_VHT = 21,
IEEE80211_RADIOTAP_TIMESTAMP = 22,
+ IEEE80211_RADIOTAP_HE = 23,
+ IEEE80211_RADIOTAP_HE_MU = 24,
/* valid in every it_present bitmap, even vendor namespaces */
IEEE80211_RADIOTAP_RADIOTAP_NAMESPACE = 29,
@@ -202,6 +205,126 @@ enum ieee80211_radiotap_timestamp_flags {
IEEE80211_RADIOTAP_TIMESTAMP_FLAG_ACCURACY = 0x02,
};
+struct ieee80211_radiotap_he {
+ __le16 data1, data2, data3, data4, data5, data6;
+};
+
+enum ieee80211_radiotap_he_bits {
+ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MASK = 3,
+ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_SU = 0,
+ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_EXT_SU = 1,
+ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_MU = 2,
+ IEEE80211_RADIOTAP_HE_DATA1_FORMAT_TRIG = 3,
+
+ IEEE80211_RADIOTAP_HE_DATA1_BSS_COLOR_KNOWN = 0x0004,
+ IEEE80211_RADIOTAP_HE_DATA1_BEAM_CHANGE_KNOWN = 0x0008,
+ IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN = 0x0010,
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN = 0x0020,
+ IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN = 0x0040,
+ IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN = 0x0080,
+ IEEE80211_RADIOTAP_HE_DATA1_LDPC_XSYMSEG_KNOWN = 0x0100,
+ IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN = 0x0200,
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE_KNOWN = 0x0400,
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE2_KNOWN = 0x0800,
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE3_KNOWN = 0x1000,
+ IEEE80211_RADIOTAP_HE_DATA1_SPTL_REUSE4_KNOWN = 0x2000,
+ IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN = 0x4000,
+ IEEE80211_RADIOTAP_HE_DATA1_DOPPLER_KNOWN = 0x8000,
+
+ IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN = 0x0001,
+ IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN = 0x0002,
+ IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN = 0x0004,
+ IEEE80211_RADIOTAP_HE_DATA2_PRE_FEC_PAD_KNOWN = 0x0008,
+ IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN = 0x0010,
+ IEEE80211_RADIOTAP_HE_DATA2_PE_DISAMBIG_KNOWN = 0x0020,
+ IEEE80211_RADIOTAP_HE_DATA2_TXOP_KNOWN = 0x0040,
+ IEEE80211_RADIOTAP_HE_DATA2_MIDAMBLE_KNOWN = 0x0080,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET = 0x3f00,
+ IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET_KNOWN = 0x4000,
+ IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC = 0x8000,
+
+ IEEE80211_RADIOTAP_HE_DATA3_BSS_COLOR = 0x003f,
+ IEEE80211_RADIOTAP_HE_DATA3_BEAM_CHANGE = 0x0040,
+ IEEE80211_RADIOTAP_HE_DATA3_UL_DL = 0x0080,
+ IEEE80211_RADIOTAP_HE_DATA3_DATA_MCS = 0x0f00,
+ IEEE80211_RADIOTAP_HE_DATA3_DATA_DCM = 0x1000,
+ IEEE80211_RADIOTAP_HE_DATA3_CODING = 0x2000,
+ IEEE80211_RADIOTAP_HE_DATA3_LDPC_XSYMSEG = 0x4000,
+ IEEE80211_RADIOTAP_HE_DATA3_STBC = 0x8000,
+
+ IEEE80211_RADIOTAP_HE_DATA4_SU_MU_SPTL_REUSE = 0x000f,
+ IEEE80211_RADIOTAP_HE_DATA4_MU_STA_ID = 0x7ff0,
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE1 = 0x000f,
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE2 = 0x00f0,
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE3 = 0x0f00,
+ IEEE80211_RADIOTAP_HE_DATA4_TB_SPTL_REUSE4 = 0xf000,
+
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC = 0x000f,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_20MHZ = 0,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_40MHZ = 1,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_80MHZ = 2,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_160MHZ = 3,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_26T = 4,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_52T = 5,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_106T = 6,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_242T = 7,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_484T = 8,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_996T = 9,
+ IEEE80211_RADIOTAP_HE_DATA5_DATA_BW_RU_ALLOC_2x996T = 10,
+
+ IEEE80211_RADIOTAP_HE_DATA5_GI = 0x0030,
+ IEEE80211_RADIOTAP_HE_DATA5_GI_0_8 = 0,
+ IEEE80211_RADIOTAP_HE_DATA5_GI_1_6 = 1,
+ IEEE80211_RADIOTAP_HE_DATA5_GI_3_2 = 2,
+
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE = 0x00c0,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_UNKNOWN = 0,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_1X = 1,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_2X = 2,
+ IEEE80211_RADIOTAP_HE_DATA5_LTF_SIZE_4X = 3,
+ IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS = 0x0700,
+ IEEE80211_RADIOTAP_HE_DATA5_PRE_FEC_PAD = 0x3000,
+ IEEE80211_RADIOTAP_HE_DATA5_TXBF = 0x4000,
+ IEEE80211_RADIOTAP_HE_DATA5_PE_DISAMBIG = 0x8000,
+
+ IEEE80211_RADIOTAP_HE_DATA6_NSTS = 0x000f,
+ IEEE80211_RADIOTAP_HE_DATA6_DOPPLER = 0x0010,
+ IEEE80211_RADIOTAP_HE_DATA6_TXOP = 0x7f00,
+ IEEE80211_RADIOTAP_HE_DATA6_MIDAMBLE_PDCTY = 0x8000,
+};
+
+struct ieee80211_radiotap_he_mu {
+ __le16 flags1, flags2;
+ u8 ru_ch1[4];
+ u8 ru_ch2[4];
+};
+
+enum ieee80211_radiotap_he_mu_bits {
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS = 0x000f,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN = 0x0010,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM = 0x0020,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN = 0x0040,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_CTR_26T_RU_KNOWN = 0x0080,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_RU_KNOWN = 0x0100,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH2_RU_KNOWN = 0x0200,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU_KNOWN = 0x1000,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_CH1_CTR_26T_RU = 0x2000,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN = 0x4000,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN = 0x8000,
+
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW = 0x0003,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_20MHZ = 0x0000,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_40MHZ = 0x0001,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_80MHZ = 0x0002,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_160MHZ = 0x0003,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_BW_FROM_SIG_A_BW_KNOWN = 0x0004,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP = 0x0008,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS = 0x00f0,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW = 0x0300,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN= 0x0400,
+ IEEE80211_RADIOTAP_HE_MU_FLAGS2_CH2_CTR_26T_RU = 0x0800,
+};
+
/**
* ieee80211_get_radiotap_len - get radiotap header length
*/
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index 384b90c62c0b..3ca969cbd161 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -43,7 +43,7 @@ int inet_ctl_sock_create(struct sock **sk, unsigned short family,
int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
int *addr_len);
-struct sk_buff **inet_gro_receive(struct sk_buff **head, struct sk_buff *skb);
+struct sk_buff *inet_gro_receive(struct list_head *head, struct sk_buff *skb);
int inet_gro_complete(struct sk_buff *skb, int nhoff);
struct sk_buff *inet_gso_segment(struct sk_buff *skb,
netdev_features_t features);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 0a6c9e0f2b5a..371b3b45fd5c 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -19,6 +19,7 @@
#include <linux/string.h>
#include <linux/timer.h>
#include <linux/poll.h>
+#include <linux/kernel.h>
#include <net/inet_sock.h>
#include <net/request_sock.h>
@@ -167,7 +168,8 @@ enum inet_csk_ack_state_t {
ICSK_ACK_SCHED = 1,
ICSK_ACK_TIMER = 2,
ICSK_ACK_PUSHED = 4,
- ICSK_ACK_PUSHED2 = 8
+ ICSK_ACK_PUSHED2 = 8,
+ ICSK_ACK_NOW = 16 /* Send the next ACK immediately (once) */
};
void inet_csk_init_xmit_timers(struct sock *sk,
@@ -224,7 +226,7 @@ static inline void inet_csk_reset_xmit_timer(struct sock *sk, const int what,
if (when > max_when) {
pr_debug("reset_xmit_timer: sk=%p %d when=0x%lx, caller=%p\n",
- sk, what, when, current_text_addr());
+ sk, what, when, (void *)_THIS_IP_);
when = max_when;
}
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index ed07e3786d98..1662cbc0b46b 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -2,7 +2,7 @@
#ifndef __NET_FRAG_H__
#define __NET_FRAG_H__
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
struct netns_frags {
/* sysctls */
@@ -57,7 +57,9 @@ struct frag_v6_compare_key {
* @lock: spinlock protecting this frag
* @refcnt: reference count of the queue
* @fragments: received fragments head
+ * @rb_fragments: received fragments rb-tree root
* @fragments_tail: received fragments tail
+ * @last_run_head: the head of the last "run". see ip_fragment.c
* @stamp: timestamp of the last received fragment
* @len: total length of the original datagram
* @meat: length of received fragments so far
@@ -75,8 +77,10 @@ struct inet_frag_queue {
struct timer_list timer;
spinlock_t lock;
refcount_t refcnt;
- struct sk_buff *fragments;
+ struct sk_buff *fragments; /* Used in IPv6. */
+ struct rb_root rb_fragments; /* Used in IPv4. */
struct sk_buff *fragments_tail;
+ struct sk_buff *last_run_head;
ktime_t stamp;
int len;
int meat;
@@ -112,6 +116,9 @@ void inet_frag_kill(struct inet_frag_queue *q);
void inet_frag_destroy(struct inet_frag_queue *q);
struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key);
+/* Free all skbs in the queue; return the sum of their truesizes. */
+unsigned int inet_frag_rbtree_purge(struct rb_root *root);
+
static inline void inet_frag_put(struct inet_frag_queue *q)
{
if (refcount_dec_and_test(&q->refcnt))
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 83d5b3c2ac42..e03b93360f33 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -148,6 +148,7 @@ struct inet_cork {
__s16 tos;
char priority;
__u16 gso_size;
+ u64 transmit_time;
};
struct inet_cork_full {
@@ -358,4 +359,12 @@ static inline bool inet_get_convert_csum(struct sock *sk)
return !!inet_sk(sk)->convert_csum;
}
+
+static inline bool inet_can_nonlocal_bind(struct net *net,
+ struct inet_sock *inet)
+{
+ return net->ipv4.sysctl_ip_nonlocal_bind ||
+ inet->freebind || inet->transparent;
+}
+
#endif /* _INET_SOCK_H */
diff --git a/include/net/ip.h b/include/net/ip.h
index 0d2281b4b27a..e44b1a44f67a 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -72,13 +72,27 @@ struct ipcm_cookie {
__be32 addr;
int oif;
struct ip_options_rcu *opt;
- __u8 tx_flags;
__u8 ttl;
__s16 tos;
char priority;
__u16 gso_size;
};
+static inline void ipcm_init(struct ipcm_cookie *ipcm)
+{
+ *ipcm = (struct ipcm_cookie) { .tos = -1 };
+}
+
+static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
+ const struct inet_sock *inet)
+{
+ ipcm_init(ipcm);
+
+ ipcm->sockc.tsflags = inet->sk.sk_tsflags;
+ ipcm->oif = inet->sk.sk_bound_dev_if;
+ ipcm->addr = inet->inet_saddr;
+}
+
#define IPCB(skb) ((struct inet_skb_parm*)((skb)->cb))
#define PKTINFO_SKB_CB(skb) ((struct in_pktinfo *)((skb)->cb))
@@ -138,6 +152,8 @@ int ip_build_and_send_pkt(struct sk_buff *skb, const struct sock *sk,
struct ip_options_rcu *opt);
int ip_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt,
struct net_device *orig_dev);
+void ip_list_rcv(struct list_head *head, struct packet_type *pt,
+ struct net_device *orig_dev);
int ip_local_deliver(struct sk_buff *skb);
int ip_mr_input(struct sk_buff *skb);
int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb);
@@ -148,7 +164,8 @@ void ip_send_check(struct iphdr *ip);
int __ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
int ip_local_out(struct net *net, struct sock *sk, struct sk_buff *skb);
-int ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl);
+int __ip_queue_xmit(struct sock *sk, struct sk_buff *skb, struct flowi *fl,
+ __u8 tos);
void ip_init(void);
int ip_append_data(struct sock *sk, struct flowi4 *fl4,
int getfrag(void *from, char *to, int offset, int len,
@@ -174,6 +191,12 @@ struct sk_buff *ip_make_skb(struct sock *sk, struct flowi4 *fl4,
struct ipcm_cookie *ipc, struct rtable **rtp,
struct inet_cork *cork, unsigned int flags);
+static inline int ip_queue_xmit(struct sock *sk, struct sk_buff *skb,
+ struct flowi *fl)
+{
+ return __ip_queue_xmit(sk, skb, fl, inet_sk(sk)->tos);
+}
+
static inline struct sk_buff *ip_finish_skb(struct sock *sk, struct flowi4 *fl4)
{
return __ip_make_skb(sk, fl4, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 5cba71d2dc44..3d4930528db0 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -170,6 +170,7 @@ struct fib6_info {
unused:3;
struct fib6_nh fib6_nh;
+ struct rcu_head rcu;
};
struct rt6_info {
@@ -273,17 +274,22 @@ static inline void ip6_rt_put(struct rt6_info *rt)
}
struct fib6_info *fib6_info_alloc(gfp_t gfp_flags);
-void fib6_info_destroy(struct fib6_info *f6i);
+void fib6_info_destroy_rcu(struct rcu_head *head);
static inline void fib6_info_hold(struct fib6_info *f6i)
{
atomic_inc(&f6i->fib6_ref);
}
+static inline bool fib6_info_hold_safe(struct fib6_info *f6i)
+{
+ return atomic_inc_not_zero(&f6i->fib6_ref);
+}
+
static inline void fib6_info_release(struct fib6_info *f6i)
{
if (f6i && atomic_dec_and_test(&f6i->fib6_ref))
- fib6_info_destroy(f6i);
+ call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
}
enum fib6_walk_state {
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 59656fc580df..7b9c82de11cc 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -66,6 +66,12 @@ static inline bool rt6_need_strict(const struct in6_addr *daddr)
(IPV6_ADDR_MULTICAST | IPV6_ADDR_LINKLOCAL | IPV6_ADDR_LOOPBACK);
}
+static inline bool rt6_qualify_for_ecmp(const struct fib6_info *f6i)
+{
+ return (f6i->fib6_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
+ RTF_GATEWAY;
+}
+
void ip6_route_input(struct sk_buff *skb);
struct dst_entry *ip6_route_input_lookup(struct net *net,
struct net_device *dev,
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 90ff430f5e9d..b0d022ff6ea1 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -466,10 +466,12 @@ static inline void ip_tunnel_info_opts_get(void *to,
}
static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
- const void *from, int len)
+ const void *from, int len,
+ __be16 flags)
{
memcpy(ip_tunnel_info_opts(info), from, len);
info->options_len = len;
+ info->key.tun_flags |= flags;
}
static inline struct ip_tunnel_info *lwt_tun_info(struct lwtunnel_state *lwtstate)
@@ -511,9 +513,11 @@ static inline void ip_tunnel_info_opts_get(void *to,
}
static inline void ip_tunnel_info_opts_set(struct ip_tunnel_info *info,
- const void *from, int len)
+ const void *from, int len,
+ __be16 flags)
{
info->options_len = 0;
+ info->key.tun_flags |= flags;
}
#endif /* CONFIG_INET */
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index a0bec23c6d5e..a0d2e0bb9a94 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -335,6 +335,11 @@ enum ip_vs_sctp_states {
IP_VS_SCTP_S_LAST
};
+/* Connection templates use bits from state */
+#define IP_VS_CTPL_S_NONE 0x0000
+#define IP_VS_CTPL_S_ASSURED 0x0001
+#define IP_VS_CTPL_S_LAST 0x0002
+
/* Delta sequence info structure
* Each ip_vs_conn has 2 (output AND input seq. changes).
* Only used in the VS/NAT.
@@ -1221,7 +1226,7 @@ struct ip_vs_conn *ip_vs_conn_new(const struct ip_vs_conn_param *p, int dest_af,
struct ip_vs_dest *dest, __u32 fwmark);
void ip_vs_conn_expire_now(struct ip_vs_conn *cp);
-const char *ip_vs_state_name(__u16 proto, int state);
+const char *ip_vs_state_name(const struct ip_vs_conn *cp);
void ip_vs_tcp_conn_listen(struct ip_vs_conn *cp);
int ip_vs_check_template(struct ip_vs_conn *ct, struct ip_vs_dest *cdest);
@@ -1289,6 +1294,17 @@ ip_vs_control_add(struct ip_vs_conn *cp, struct ip_vs_conn *ctl_cp)
atomic_inc(&ctl_cp->n_control);
}
+/* Mark our template as assured */
+static inline void
+ip_vs_control_assure_ct(struct ip_vs_conn *cp)
+{
+ struct ip_vs_conn *ct = cp->control;
+
+ if (ct && !(ct->state & IP_VS_CTPL_S_ASSURED) &&
+ (ct->flags & IP_VS_CONN_F_TEMPLATE))
+ ct->state |= IP_VS_CTPL_S_ASSURED;
+}
+
/* IPVS netns init & cleanup functions */
int ip_vs_estimator_net_init(struct netns_ipvs *ipvs);
int ip_vs_control_net_init(struct netns_ipvs *ipvs);
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 16475c269749..ff33f498c137 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -294,6 +294,7 @@ struct ipv6_fl_socklist {
};
struct ipcm6_cookie {
+ struct sockcm_cookie sockc;
__s16 hlimit;
__s16 tclass;
__s8 dontfrag;
@@ -301,6 +302,25 @@ struct ipcm6_cookie {
__u16 gso_size;
};
+static inline void ipcm6_init(struct ipcm6_cookie *ipc6)
+{
+ *ipc6 = (struct ipcm6_cookie) {
+ .hlimit = -1,
+ .tclass = -1,
+ .dontfrag = -1,
+ };
+}
+
+static inline void ipcm6_init_sk(struct ipcm6_cookie *ipc6,
+ const struct ipv6_pinfo *np)
+{
+ *ipc6 = (struct ipcm6_cookie) {
+ .hlimit = -1,
+ .tclass = np->tclass,
+ .dontfrag = np->dontfrag,
+ };
+}
+
static inline struct ipv6_txoptions *txopt_get(const struct ipv6_pinfo *np)
{
struct ipv6_txoptions *opt;
@@ -355,14 +375,7 @@ struct ipv6_txoptions *ipv6_dup_options(struct sock *sk,
struct ipv6_txoptions *ipv6_renew_options(struct sock *sk,
struct ipv6_txoptions *opt,
int newtype,
- struct ipv6_opt_hdr __user *newopt,
- int newoptlen);
-struct ipv6_txoptions *
-ipv6_renew_options_kern(struct sock *sk,
- struct ipv6_txoptions *opt,
- int newtype,
- struct ipv6_opt_hdr *newopt,
- int newoptlen);
+ struct ipv6_opt_hdr *newopt);
struct ipv6_txoptions *ipv6_fixup_options(struct ipv6_txoptions *opt_space,
struct ipv6_txoptions *opt);
@@ -561,34 +574,6 @@ static inline bool ipv6_prefix_equal(const struct in6_addr *addr1,
}
#endif
-struct inet_frag_queue;
-
-enum ip6_defrag_users {
- IP6_DEFRAG_LOCAL_DELIVER,
- IP6_DEFRAG_CONNTRACK_IN,
- __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
- IP6_DEFRAG_CONNTRACK_OUT,
- __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
- IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
- __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
-};
-
-void ip6_frag_init(struct inet_frag_queue *q, const void *a);
-extern const struct rhashtable_params ip6_rhash_params;
-
-/*
- * Equivalent of ipv4 struct ip
- */
-struct frag_queue {
- struct inet_frag_queue q;
-
- int iif;
- __u16 nhoffset;
- u8 ecn;
-};
-
-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq);
-
static inline bool ipv6_addr_any(const struct in6_addr *a)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
@@ -797,6 +782,13 @@ static inline void iph_to_flow_copy_v6addrs(struct flow_keys *flow,
#if IS_ENABLED(CONFIG_IPV6)
+static inline bool ipv6_can_nonlocal_bind(struct net *net,
+ struct inet_sock *inet)
+{
+ return net->ipv6.sysctl.ip_nonlocal_bind ||
+ inet->freebind || inet->transparent;
+}
+
/* Sysctl settings for net ipv6.auto_flowlabels */
#define IP6_AUTO_FLOW_LABEL_OFF 0
#define IP6_AUTO_FLOW_LABEL_OPTOUT 1
@@ -830,7 +822,7 @@ static inline __be32 ip6_make_flowlabel(struct net *net, struct sk_buff *skb,
* to minimize possbility that any useful information to an
* attacker is leaked. Only lower 20 bits are relevant.
*/
- rol32(hash, 16);
+ hash = rol32(hash, 16);
flowlabel = (__force __be32)hash & IPV6_FLOWLABEL_MASK;
@@ -922,6 +914,8 @@ static inline __be32 flowi6_get_flowlabel(const struct flowi6 *fl6)
int ipv6_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *pt, struct net_device *orig_dev);
+void ipv6_list_rcv(struct list_head *head, struct packet_type *pt,
+ struct net_device *orig_dev);
int ip6_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
@@ -938,8 +932,7 @@ int ip6_append_data(struct sock *sk,
int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
- struct rt6_info *rt, unsigned int flags,
- const struct sockcm_cookie *sockc);
+ struct rt6_info *rt, unsigned int flags);
int ip6_push_pending_frames(struct sock *sk);
@@ -956,8 +949,7 @@ struct sk_buff *ip6_make_skb(struct sock *sk,
void *from, int length, int transhdrlen,
struct ipcm6_cookie *ipc6, struct flowi6 *fl6,
struct rt6_info *rt, unsigned int flags,
- struct inet_cork_full *cork,
- const struct sockcm_cookie *sockc);
+ struct inet_cork_full *cork);
static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
{
@@ -1107,6 +1099,8 @@ void ipv6_sysctl_unregister(void);
int ipv6_sock_mc_join(struct sock *sk, int ifindex,
const struct in6_addr *addr);
+int ipv6_sock_mc_join_ssm(struct sock *sk, int ifindex,
+ const struct in6_addr *addr, unsigned int mode);
int ipv6_sock_mc_drop(struct sock *sk, int ifindex,
const struct in6_addr *addr);
#endif /* _NET_IPV6_H */
diff --git a/include/net/ipv6_frag.h b/include/net/ipv6_frag.h
new file mode 100644
index 000000000000..6ced1e6899b6
--- /dev/null
+++ b/include/net/ipv6_frag.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _IPV6_FRAG_H
+#define _IPV6_FRAG_H
+#include <linux/kernel.h>
+#include <net/addrconf.h>
+#include <net/ipv6.h>
+#include <net/inet_frag.h>
+
+enum ip6_defrag_users {
+ IP6_DEFRAG_LOCAL_DELIVER,
+ IP6_DEFRAG_CONNTRACK_IN,
+ __IP6_DEFRAG_CONNTRACK_IN = IP6_DEFRAG_CONNTRACK_IN + USHRT_MAX,
+ IP6_DEFRAG_CONNTRACK_OUT,
+ __IP6_DEFRAG_CONNTRACK_OUT = IP6_DEFRAG_CONNTRACK_OUT + USHRT_MAX,
+ IP6_DEFRAG_CONNTRACK_BRIDGE_IN,
+ __IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
+};
+
+/*
+ * Equivalent of ipv4 struct ip
+ */
+struct frag_queue {
+ struct inet_frag_queue q;
+
+ int iif;
+ __u16 nhoffset;
+ u8 ecn;
+};
+
+#if IS_ENABLED(CONFIG_IPV6)
+static inline void ip6frag_init(struct inet_frag_queue *q, const void *a)
+{
+ struct frag_queue *fq = container_of(q, struct frag_queue, q);
+ const struct frag_v6_compare_key *key = a;
+
+ q->key.v6 = *key;
+ fq->ecn = 0;
+}
+
+static inline u32 ip6frag_key_hashfn(const void *data, u32 len, u32 seed)
+{
+ return jhash2(data,
+ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
+}
+
+static inline u32 ip6frag_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct inet_frag_queue *fq = data;
+
+ return jhash2((const u32 *)&fq->key.v6,
+ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
+}
+
+static inline int
+ip6frag_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
+{
+ const struct frag_v6_compare_key *key = arg->key;
+ const struct inet_frag_queue *fq = ptr;
+
+ return !!memcmp(&fq->key, key, sizeof(*key));
+}
+
+static inline void
+ip6frag_expire_frag_queue(struct net *net, struct frag_queue *fq)
+{
+ struct net_device *dev = NULL;
+ struct sk_buff *head;
+
+ rcu_read_lock();
+ spin_lock(&fq->q.lock);
+
+ if (fq->q.flags & INET_FRAG_COMPLETE)
+ goto out;
+
+ inet_frag_kill(&fq->q);
+
+ dev = dev_get_by_index_rcu(net, fq->iif);
+ if (!dev)
+ goto out;
+
+ __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
+ __IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
+
+ /* Don't send error if the first segment did not arrive. */
+ head = fq->q.fragments;
+ if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
+ goto out;
+
+ head->dev = dev;
+ skb_get(head);
+ spin_unlock(&fq->q.lock);
+
+ icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
+ kfree_skb(head);
+ goto out_rcu_unlock;
+
+out:
+ spin_unlock(&fq->q.lock);
+out_rcu_unlock:
+ rcu_read_unlock();
+ inet_frag_put(&fq->q);
+}
+#endif
+#endif
diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
index b0eaeb02d46d..f4c21b5a1242 100644
--- a/include/net/iucv/af_iucv.h
+++ b/include/net/iucv/af_iucv.h
@@ -153,6 +153,8 @@ struct iucv_sock_list {
atomic_t autobind_name;
};
+__poll_t iucv_sock_poll(struct file *file, struct socket *sock,
+ poll_table *wait);
void iucv_sock_link(struct iucv_sock_list *l, struct sock *s);
void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *s);
void iucv_accept_enqueue(struct sock *parent, struct sock *sk);
diff --git a/include/net/lag.h b/include/net/lag.h
new file mode 100644
index 000000000000..95b880e6fdde
--- /dev/null
+++ b/include/net/lag.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_IF_LAG_H
+#define _LINUX_IF_LAG_H
+
+#include <linux/netdevice.h>
+#include <linux/if_team.h>
+#include <net/bonding.h>
+
+static inline bool net_lag_port_dev_txable(const struct net_device *port_dev)
+{
+ if (netif_is_team_port(port_dev))
+ return team_port_dev_txable(port_dev);
+ else
+ return bond_is_active_slave_dev(port_dev);
+}
+
+#endif /* _LINUX_IF_LAG_H */
diff --git a/include/net/llc.h b/include/net/llc.h
index dc35f25eb679..890a87318014 100644
--- a/include/net/llc.h
+++ b/include/net/llc.h
@@ -116,6 +116,11 @@ static inline void llc_sap_hold(struct llc_sap *sap)
refcount_inc(&sap->refcnt);
}
+static inline bool llc_sap_hold_safe(struct llc_sap *sap)
+{
+ return refcount_inc_not_zero(&sap->refcnt);
+}
+
void llc_sap_close(struct llc_sap *sap);
static inline void llc_sap_put(struct llc_sap *sap)
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 851a5e19ae32..5790f55c241d 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -23,6 +23,7 @@
#include <linux/ieee80211.h>
#include <net/cfg80211.h>
#include <net/codel.h>
+#include <net/ieee80211_radiotap.h>
#include <asm/unaligned.h>
/**
@@ -162,6 +163,8 @@ enum ieee80211_ac_numbers {
* @txop: maximum burst time in units of 32 usecs, 0 meaning disabled
* @acm: is mandatory admission control required for the access category
* @uapsd: is U-APSD mode enabled for the queue
+ * @mu_edca: is the MU EDCA configured
+ * @mu_edca_param_rec: MU EDCA Parameter Record for HE
*/
struct ieee80211_tx_queue_params {
u16 txop;
@@ -170,6 +173,8 @@ struct ieee80211_tx_queue_params {
u8 aifs;
bool acm;
bool uapsd;
+ bool mu_edca;
+ struct ieee80211_he_mu_edca_param_ac_rec mu_edca_param_rec;
};
struct ieee80211_low_level_stats {
@@ -463,6 +468,15 @@ struct ieee80211_mu_group_data {
* This structure keeps information about a BSS (and an association
* to that BSS) that can change during the lifetime of the BSS.
*
+ * @bss_color: 6-bit value to mark inter-BSS frame, if BSS supports HE
+ * @htc_trig_based_pkt_ext: default PE in 4us units, if BSS supports HE
+ * @multi_sta_back_32bit: supports BA bitmap of 32-bits in Multi-STA BACK
+ * @uora_exists: is the UORA element advertised by AP
+ * @ack_enabled: indicates support to receive a multi-TID that solicits either
+ * ACK, BACK or both
+ * @uora_ocw_range: UORA element's OCW Range field
+ * @frame_time_rts_th: HE duration RTS threshold, in units of 32us
+ * @he_support: does this BSS support HE
* @assoc: association status
* @ibss_joined: indicates whether this station is part of an IBSS
* or not
@@ -550,6 +564,14 @@ struct ieee80211_mu_group_data {
*/
struct ieee80211_bss_conf {
const u8 *bssid;
+ u8 bss_color;
+ u8 htc_trig_based_pkt_ext;
+ bool multi_sta_back_32bit;
+ bool uora_exists;
+ bool ack_enabled;
+ u8 uora_ocw_range;
+ u16 frame_time_rts_th;
+ bool he_support;
/* association related data */
bool assoc, ibss_joined;
bool ibss_creator;
@@ -1106,6 +1128,18 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* @RX_FLAG_AMPDU_EOF_BIT: Value of the EOF bit in the A-MPDU delimiter for this
* frame
* @RX_FLAG_AMPDU_EOF_BIT_KNOWN: The EOF value is known
+ * @RX_FLAG_RADIOTAP_HE: HE radiotap data is present
+ * (&struct ieee80211_radiotap_he, mac80211 will fill in
+ * - DATA3_DATA_MCS
+ * - DATA3_DATA_DCM
+ * - DATA3_CODING
+ * - DATA5_GI
+ * - DATA5_DATA_BW_RU_ALLOC
+ * - DATA6_NSTS
+ * - DATA3_STBC
+ * from the RX info data, so leave those zeroed when building this data)
+ * @RX_FLAG_RADIOTAP_HE_MU: HE MU radiotap data is present
+ * (&struct ieee80211_radiotap_he_mu)
*/
enum mac80211_rx_flags {
RX_FLAG_MMIC_ERROR = BIT(0),
@@ -1134,6 +1168,8 @@ enum mac80211_rx_flags {
RX_FLAG_ICV_STRIPPED = BIT(23),
RX_FLAG_AMPDU_EOF_BIT = BIT(24),
RX_FLAG_AMPDU_EOF_BIT_KNOWN = BIT(25),
+ RX_FLAG_RADIOTAP_HE = BIT(26),
+ RX_FLAG_RADIOTAP_HE_MU = BIT(27),
};
/**
@@ -1164,6 +1200,7 @@ enum mac80211_rx_encoding {
RX_ENC_LEGACY = 0,
RX_ENC_HT,
RX_ENC_VHT,
+ RX_ENC_HE,
};
/**
@@ -1198,6 +1235,9 @@ enum mac80211_rx_encoding {
* @encoding: &enum mac80211_rx_encoding
* @bw: &enum rate_info_bw
* @enc_flags: uses bits from &enum mac80211_rx_encoding_flags
+ * @he_ru: HE RU, from &enum nl80211_he_ru_alloc
+ * @he_gi: HE GI, from &enum nl80211_he_gi
+ * @he_dcm: HE DCM value
* @rx_flags: internal RX flags for mac80211
* @ampdu_reference: A-MPDU reference number, must be a different value for
* each A-MPDU but the same for each subframe within one A-MPDU
@@ -1211,7 +1251,8 @@ struct ieee80211_rx_status {
u32 flag;
u16 freq;
u8 enc_flags;
- u8 encoding:2, bw:3;
+ u8 encoding:2, bw:3, he_ru:3;
+ u8 he_gi:2, he_dcm:1;
u8 rate_idx;
u8 nss;
u8 rx_flags;
@@ -1770,6 +1811,7 @@ struct ieee80211_sta_rates {
* @supp_rates: Bitmap of supported rates (per band)
* @ht_cap: HT capabilities of this STA; restricted to our own capabilities
* @vht_cap: VHT capabilities of this STA; restricted to our own capabilities
+ * @he_cap: HE capabilities of this STA
* @max_rx_aggregation_subframes: maximal amount of frames in a single AMPDU
* that this station is allowed to transmit to us.
* Can be modified by driver.
@@ -1805,7 +1847,8 @@ struct ieee80211_sta {
u16 aid;
struct ieee80211_sta_ht_cap ht_cap;
struct ieee80211_sta_vht_cap vht_cap;
- u8 max_rx_aggregation_subframes;
+ struct ieee80211_sta_he_cap he_cap;
+ u16 max_rx_aggregation_subframes;
bool wme;
u8 uapsd_queues;
u8 max_sp;
@@ -2196,10 +2239,11 @@ enum ieee80211_hw_flags {
* it shouldn't be set.
*
* @max_tx_aggregation_subframes: maximum number of subframes in an
- * aggregate an HT driver will transmit. Though ADDBA will advertise
- * a constant value of 64 as some older APs can crash if the window
- * size is smaller (an example is LinkSys WRT120N with FW v1.0.07
- * build 002 Jun 18 2012).
+ * aggregate an HT/HE device will transmit. In HT AddBA we'll
+ * advertise a constant value of 64 as some older APs crash if
+ * the window size is smaller (an example is LinkSys WRT120N
+ * with FW v1.0.07 build 002 Jun 18 2012).
+ * For AddBA to HE capable peers this value will be used.
*
* @max_tx_fragments: maximum number of tx buffers per (A)-MSDU, sum
* of 1 + skb_shinfo(skb)->nr_frags for each skb in the frag_list.
@@ -2216,6 +2260,8 @@ enum ieee80211_hw_flags {
* the default is _GI | _BANDWIDTH.
* Use the %IEEE80211_RADIOTAP_VHT_KNOWN_\* values.
*
+ * @radiotap_he: HE radiotap validity flags
+ *
* @radiotap_timestamp: Information for the radiotap timestamp field; if the
* 'units_pos' member is set to a non-negative value it must be set to
* a combination of a IEEE80211_RADIOTAP_TIMESTAMP_UNIT_* and a
@@ -2263,8 +2309,8 @@ struct ieee80211_hw {
u8 max_rates;
u8 max_report_rates;
u8 max_rate_tries;
- u8 max_rx_aggregation_subframes;
- u8 max_tx_aggregation_subframes;
+ u16 max_rx_aggregation_subframes;
+ u16 max_tx_aggregation_subframes;
u8 max_tx_fragments;
u8 offchannel_tx_hw_queue;
u8 radiotap_mcs_details;
@@ -2904,7 +2950,7 @@ struct ieee80211_ampdu_params {
struct ieee80211_sta *sta;
u16 tid;
u16 ssn;
- u8 buf_size;
+ u16 buf_size;
bool amsdu;
u16 timeout;
};
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index 47e35cce3b64..9b5fdc50519a 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -10,6 +10,7 @@
#include <linux/workqueue.h>
#include <linux/list.h>
#include <linux/sysctl.h>
+#include <linux/uidgid.h>
#include <net/flow.h>
#include <net/netns/core.h>
@@ -128,6 +129,7 @@ struct net {
#endif
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
struct netns_nf_frag nf_frag;
+ struct ctl_table_header *nf_frag_frags_hdr;
#endif
struct sock *nfnl;
struct sock *nfnl_stash;
@@ -169,6 +171,8 @@ extern struct net init_net;
struct net *copy_net_ns(unsigned long flags, struct user_namespace *user_ns,
struct net *old_net);
+void net_ns_get_ownership(const struct net *net, kuid_t *uid, kgid_t *gid);
+
void net_ns_barrier(void);
#else /* CONFIG_NET_NS */
#include <linux/sched.h>
@@ -181,6 +185,13 @@ static inline struct net *copy_net_ns(unsigned long flags,
return old_net;
}
+static inline void net_ns_get_ownership(const struct net *net,
+ kuid_t *uid, kgid_t *gid)
+{
+ *uid = GLOBAL_ROOT_UID;
+ *gid = GLOBAL_ROOT_GID;
+}
+
static inline void net_ns_barrier(void) {}
#endif /* CONFIG_NET_NS */
diff --git a/include/net/netevent.h b/include/net/netevent.h
index d9918261701c..4107016c3bb4 100644
--- a/include/net/netevent.h
+++ b/include/net/netevent.h
@@ -28,6 +28,7 @@ enum netevent_notif_type {
NETEVENT_DELAY_PROBE_TIME_UPDATE, /* arg is struct neigh_parms ptr */
NETEVENT_IPV4_MPATH_HASH_UPDATE, /* arg is struct net ptr */
NETEVENT_IPV6_MPATH_HASH_UPDATE, /* arg is struct net ptr */
+ NETEVENT_IPV4_FWD_UPDATE_PRIORITY_UPDATE, /* arg is struct net ptr */
};
int register_netevent_notifier(struct notifier_block *nb);
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 73f825732326..c84b51682f08 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -10,9 +10,6 @@
#ifndef _NF_CONNTRACK_IPV4_H
#define _NF_CONNTRACK_IPV4_H
-
-const extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
-
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
extern const struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 062dc19b5840..7e012312cd61 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -41,6 +41,11 @@ union nf_conntrack_expect_proto {
/* insert expect proto private data here */
};
+struct nf_conntrack_net {
+ unsigned int users4;
+ unsigned int users6;
+};
+
#include <linux/types.h>
#include <linux/skbuff.h>
@@ -171,8 +176,6 @@ void nf_ct_netns_put(struct net *net, u8 nfproto);
*/
void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls);
-void nf_ct_free_hashtable(void *hash, unsigned int size);
-
int nf_conntrack_hash_check_insert(struct nf_conn *ct);
bool nf_ct_delete(struct nf_conn *ct, u32 pid, int report);
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 9b5e7634713e..2a3e0974a6af 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -14,7 +14,6 @@
#define _NF_CONNTRACK_CORE_H
#include <linux/netfilter.h>
-#include <net/netfilter/nf_conntrack_l3proto.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_ecache.h>
@@ -40,16 +39,8 @@ void nf_conntrack_cleanup_start(void);
void nf_conntrack_init_end(void);
void nf_conntrack_cleanup_end(void);
-bool nf_ct_get_tuple(const struct sk_buff *skb, unsigned int nhoff,
- unsigned int dataoff, u_int16_t l3num, u_int8_t protonum,
- struct net *net,
- struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_l3proto *l3proto,
- const struct nf_conntrack_l4proto *l4proto);
-
bool nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig,
- const struct nf_conntrack_l3proto *l3proto,
const struct nf_conntrack_l4proto *l4proto);
/* Find a connection corresponding to a tuple. */
@@ -75,10 +66,8 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
return ret;
}
-void
-print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_l3proto *l3proto,
- const struct nf_conntrack_l4proto *proto);
+void print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_l4proto *proto);
#define CONNTRACK_LOCKS 1024
diff --git a/include/net/netfilter/nf_conntrack_count.h b/include/net/netfilter/nf_conntrack_count.h
index 3a188a0923a3..4b2b2baf8ab4 100644
--- a/include/net/netfilter/nf_conntrack_count.h
+++ b/include/net/netfilter/nf_conntrack_count.h
@@ -1,8 +1,23 @@
#ifndef _NF_CONNTRACK_COUNT_H
#define _NF_CONNTRACK_COUNT_H
+#include <linux/list.h>
+
struct nf_conncount_data;
+enum nf_conncount_list_add {
+ NF_CONNCOUNT_ADDED, /* list add was ok */
+ NF_CONNCOUNT_ERR, /* -ENOMEM, must drop skb */
+ NF_CONNCOUNT_SKIP, /* list is already reclaimed by gc */
+};
+
+struct nf_conncount_list {
+ spinlock_t list_lock;
+ struct list_head head; /* connections with the same filtering key */
+ unsigned int count; /* length of list */
+ bool dead;
+};
+
struct nf_conncount_data *nf_conncount_init(struct net *net, unsigned int family,
unsigned int keylen);
void nf_conncount_destroy(struct net *net, unsigned int family,
@@ -14,15 +29,21 @@ unsigned int nf_conncount_count(struct net *net,
const struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_zone *zone);
-unsigned int nf_conncount_lookup(struct net *net, struct hlist_head *head,
- const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_zone *zone,
- bool *addit);
+void nf_conncount_lookup(struct net *net, struct nf_conncount_list *list,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone,
+ bool *addit);
+
+void nf_conncount_list_init(struct nf_conncount_list *list);
+
+enum nf_conncount_list_add
+nf_conncount_add(struct nf_conncount_list *list,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_zone *zone);
-bool nf_conncount_add(struct hlist_head *head,
- const struct nf_conntrack_tuple *tuple,
- const struct nf_conntrack_zone *zone);
+bool nf_conncount_gc_list(struct net *net,
+ struct nf_conncount_list *list);
-void nf_conncount_cache_free(struct hlist_head *hhead);
+void nf_conncount_cache_free(struct nf_conncount_list *list);
#endif
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 32c2a94a219d..2492120b8097 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -103,9 +103,7 @@ int nf_conntrack_helpers_register(struct nf_conntrack_helper *, unsigned int);
void nf_conntrack_helpers_unregister(struct nf_conntrack_helper *,
unsigned int);
-struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct,
- struct nf_conntrack_helper *helper,
- gfp_t gfp);
+struct nf_conn_help *nf_ct_helper_ext_add(struct nf_conn *ct, gfp_t gfp);
int __nf_ct_try_assign_helper(struct nf_conn *ct, struct nf_conn *tmpl,
gfp_t flags);
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
deleted file mode 100644
index d5808f3e2715..000000000000
--- a/include/net/netfilter/nf_conntrack_l3proto.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C)2003,2004 USAGI/WIDE Project
- *
- * Header for use in defining a given L3 protocol for connection tracking.
- *
- * Author:
- * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- *
- * Derived from include/netfilter_ipv4/ip_conntrack_protocol.h
- */
-
-#ifndef _NF_CONNTRACK_L3PROTO_H
-#define _NF_CONNTRACK_L3PROTO_H
-#include <linux/netlink.h>
-#include <net/netlink.h>
-#include <linux/seq_file.h>
-#include <net/netfilter/nf_conntrack.h>
-
-struct nf_conntrack_l3proto {
- /* L3 Protocol Family number. ex) PF_INET */
- u_int16_t l3proto;
-
- /* size of tuple nlattr, fills a hole */
- u16 nla_size;
-
- /*
- * Try to fill in the third arg: nhoff is offset of l3 proto
- * hdr. Return true if possible.
- */
- bool (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int nhoff,
- struct nf_conntrack_tuple *tuple);
-
- /*
- * Invert the per-proto part of the tuple: ie. turn xmit into reply.
- * Some packets can't be inverted: return 0 in that case.
- */
- bool (*invert_tuple)(struct nf_conntrack_tuple *inverse,
- const struct nf_conntrack_tuple *orig);
-
- /*
- * Called before tracking.
- * *dataoff: offset of protocol header (TCP, UDP,...) in skb
- * *protonum: protocol number
- */
- int (*get_l4proto)(const struct sk_buff *skb, unsigned int nhoff,
- unsigned int *dataoff, u_int8_t *protonum);
-
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK)
- int (*tuple_to_nlattr)(struct sk_buff *skb,
- const struct nf_conntrack_tuple *t);
- int (*nlattr_to_tuple)(struct nlattr *tb[],
- struct nf_conntrack_tuple *t);
- const struct nla_policy *nla_policy;
-#endif
-
- /* Called when netns wants to use connection tracking */
- int (*net_ns_get)(struct net *);
- void (*net_ns_put)(struct net *);
-
- /* Module (if any) which this is connected to. */
- struct module *me;
-};
-
-extern struct nf_conntrack_l3proto __rcu *nf_ct_l3protos[NFPROTO_NUMPROTO];
-
-/* Protocol global registration. */
-int nf_ct_l3proto_register(const struct nf_conntrack_l3proto *proto);
-void nf_ct_l3proto_unregister(const struct nf_conntrack_l3proto *proto);
-
-const struct nf_conntrack_l3proto *nf_ct_l3proto_find_get(u_int16_t l3proto);
-
-/* Existing built-in protocols */
-extern struct nf_conntrack_l3proto nf_conntrack_l3proto_generic;
-
-static inline struct nf_conntrack_l3proto *
-__nf_ct_l3proto_find(u_int16_t l3proto)
-{
- if (unlikely(l3proto >= NFPROTO_NUMPROTO))
- return &nf_conntrack_l3proto_generic;
- return rcu_dereference(nf_ct_l3protos[l3proto]);
-}
-
-#endif /*_NF_CONNTRACK_L3PROTO_H*/
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index a7220eef9aee..8465263b297d 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -36,7 +36,7 @@ struct nf_conntrack_l4proto {
struct net *net, struct nf_conntrack_tuple *tuple);
/* Invert the per-proto part of the tuple: ie. turn xmit into reply.
- * Some packets can't be inverted: return 0 in that case.
+ * Only used by icmp, most protocols use a generic version.
*/
bool (*invert_tuple)(struct nf_conntrack_tuple *inverse,
const struct nf_conntrack_tuple *orig);
@@ -45,13 +45,12 @@ struct nf_conntrack_l4proto {
int (*packet)(struct nf_conn *ct,
const struct sk_buff *skb,
unsigned int dataoff,
- enum ip_conntrack_info ctinfo,
- unsigned int *timeouts);
+ enum ip_conntrack_info ctinfo);
/* Called when a new connection for this protocol found;
* returns TRUE if it's OK. If so, packet() called next. */
bool (*new)(struct nf_conn *ct, const struct sk_buff *skb,
- unsigned int dataoff, unsigned int *timeouts);
+ unsigned int dataoff);
/* Called when a conntrack entry is destroyed */
void (*destroy)(struct nf_conn *ct);
@@ -63,9 +62,6 @@ struct nf_conntrack_l4proto {
/* called by gc worker if table is full */
bool (*can_early_drop)(const struct nf_conn *ct);
- /* Return the array of timeouts for this protocol. */
- unsigned int *(*get_timeouts)(struct net *net);
-
/* convert protoinfo to nfnetink attributes */
int (*to_nlattr)(struct sk_buff *skb, struct nlattr *nla,
struct nf_conn *ct);
@@ -81,7 +77,6 @@ struct nf_conntrack_l4proto {
struct nf_conntrack_tuple *t);
const struct nla_policy *nla_policy;
-#if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT)
struct {
int (*nlattr_to_obj)(struct nlattr *tb[],
struct net *net, void *data);
@@ -91,7 +86,6 @@ struct nf_conntrack_l4proto {
u16 nlattr_max;
const struct nla_policy *nla_policy;
} ctnl_timeout;
-#endif
#ifdef CONFIG_NF_CONNTRACK_PROCFS
/* Print out the private part of the conntrack. */
void (*print_conntrack)(struct seq_file *s, struct nf_conn *);
@@ -134,10 +128,6 @@ void nf_ct_l4proto_pernet_unregister(struct net *net,
/* Protocol global registration. */
int nf_ct_l4proto_register_one(const struct nf_conntrack_l4proto *proto);
void nf_ct_l4proto_unregister_one(const struct nf_conntrack_l4proto *proto);
-int nf_ct_l4proto_register(const struct nf_conntrack_l4proto * const proto[],
- unsigned int num_proto);
-void nf_ct_l4proto_unregister(const struct nf_conntrack_l4proto * const proto[],
- unsigned int num_proto);
/* Generic netlink helpers */
int nf_ct_port_tuple_to_nlattr(struct sk_buff *skb,
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index 9468ab4ad12d..d5f62cc6c2ae 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -11,24 +11,28 @@
#define CTNL_TIMEOUT_NAME_MAX 32
+struct nf_ct_timeout {
+ __u16 l3num;
+ const struct nf_conntrack_l4proto *l4proto;
+ char data[0];
+};
+
struct ctnl_timeout {
struct list_head head;
struct rcu_head rcu_head;
refcount_t refcnt;
char name[CTNL_TIMEOUT_NAME_MAX];
- __u16 l3num;
- const struct nf_conntrack_l4proto *l4proto;
- char data[0];
+ struct nf_ct_timeout timeout;
};
struct nf_conn_timeout {
- struct ctnl_timeout __rcu *timeout;
+ struct nf_ct_timeout __rcu *timeout;
};
static inline unsigned int *
nf_ct_timeout_data(struct nf_conn_timeout *t)
{
- struct ctnl_timeout *timeout;
+ struct nf_ct_timeout *timeout;
timeout = rcu_dereference(t->timeout);
if (timeout == NULL)
@@ -49,7 +53,7 @@ struct nf_conn_timeout *nf_ct_timeout_find(const struct nf_conn *ct)
static inline
struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct,
- struct ctnl_timeout *timeout,
+ struct nf_ct_timeout *timeout,
gfp_t gfp)
{
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
@@ -67,32 +71,23 @@ struct nf_conn_timeout *nf_ct_timeout_ext_add(struct nf_conn *ct,
#endif
};
-static inline unsigned int *
-nf_ct_timeout_lookup(struct net *net, struct nf_conn *ct,
- const struct nf_conntrack_l4proto *l4proto)
+static inline unsigned int *nf_ct_timeout_lookup(const struct nf_conn *ct)
{
+ unsigned int *timeouts = NULL;
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
struct nf_conn_timeout *timeout_ext;
- unsigned int *timeouts;
timeout_ext = nf_ct_timeout_find(ct);
- if (timeout_ext) {
+ if (timeout_ext)
timeouts = nf_ct_timeout_data(timeout_ext);
- if (unlikely(!timeouts))
- timeouts = l4proto->get_timeouts(net);
- } else {
- timeouts = l4proto->get_timeouts(net);
- }
-
- return timeouts;
-#else
- return l4proto->get_timeouts(net);
#endif
+ return timeouts;
}
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
int nf_conntrack_timeout_init(void);
void nf_conntrack_timeout_fini(void);
+void nf_ct_untimeout(struct net *net, struct nf_ct_timeout *timeout);
#else
static inline int nf_conntrack_timeout_init(void)
{
@@ -106,8 +101,8 @@ static inline void nf_conntrack_timeout_fini(void)
#endif /* CONFIG_NF_CONNTRACK_TIMEOUT */
#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
-extern struct ctnl_timeout *(*nf_ct_timeout_find_get_hook)(struct net *net, const char *name);
-extern void (*nf_ct_timeout_put_hook)(struct ctnl_timeout *timeout);
+extern struct nf_ct_timeout *(*nf_ct_timeout_find_get_hook)(struct net *net, const char *name);
+extern void (*nf_ct_timeout_put_hook)(struct nf_ct_timeout *timeout);
#endif
#endif /* _NF_CONNTRACK_TIMEOUT_H */
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index ba9fa4592f2b..0e355f4a3d76 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -4,7 +4,7 @@
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/netdevice.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
#include <linux/rcupdate.h>
#include <linux/netfilter/nf_conntrack_tuple_common.h>
#include <net/dst.h>
diff --git a/include/net/netfilter/nf_log.h b/include/net/netfilter/nf_log.h
index e811ac07ea94..0d3920896d50 100644
--- a/include/net/netfilter/nf_log.h
+++ b/include/net/netfilter/nf_log.h
@@ -106,7 +106,8 @@ int nf_log_dump_udp_header(struct nf_log_buf *m, const struct sk_buff *skb,
int nf_log_dump_tcp_header(struct nf_log_buf *m, const struct sk_buff *skb,
u8 proto, int fragment, unsigned int offset,
unsigned int logflags);
-void nf_log_dump_sk_uid_gid(struct nf_log_buf *m, struct sock *sk);
+void nf_log_dump_sk_uid_gid(struct net *net, struct nf_log_buf *m,
+ struct sock *sk);
void nf_log_dump_packet_common(struct nf_log_buf *m, u_int8_t pf,
unsigned int hooknum, const struct sk_buff *skb,
const struct net_device *in,
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 08c005ce56e9..0f39ac487012 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -150,6 +150,7 @@ static inline void nft_data_debug(const struct nft_data *data)
* @portid: netlink portID of the original message
* @seq: netlink sequence number
* @family: protocol family
+ * @level: depth of the chains
* @report: notify via unicast netlink message
*/
struct nft_ctx {
@@ -160,6 +161,7 @@ struct nft_ctx {
u32 portid;
u32 seq;
u8 family;
+ u8 level;
bool report;
};
@@ -272,7 +274,7 @@ enum nft_set_class {
* @space: memory class
*/
struct nft_set_estimate {
- unsigned int size;
+ u64 size;
enum nft_set_class lookup;
enum nft_set_class space;
};
@@ -334,7 +336,7 @@ struct nft_set_ops {
const struct nft_set_elem *elem,
unsigned int flags);
- unsigned int (*privsize)(const struct nlattr * const nla[],
+ u64 (*privsize)(const struct nlattr * const nla[],
const struct nft_set_desc *desc);
bool (*estimate)(const struct nft_set_desc *desc,
u32 features,
@@ -865,7 +867,6 @@ enum nft_chain_flags {
* @table: table that this chain belongs to
* @handle: chain handle
* @use: number of jump references to this chain
- * @level: length of longest path to this chain
* @flags: bitmask of enum nft_chain_flags
* @name: name of the chain
*/
@@ -878,7 +879,6 @@ struct nft_chain {
struct nft_table *table;
u64 handle;
u32 use;
- u16 level;
u8 flags:6,
genmask:2;
char *name;
@@ -1124,7 +1124,6 @@ struct nft_flowtable {
u32 genmask:2,
use:30;
u64 handle;
- char *dev_name[NFT_FLOWTABLE_DEVICE_MAX];
/* runtime data below here */
struct nf_hook_ops *ops ____cacheline_aligned;
struct nf_flowtable data;
@@ -1375,6 +1374,6 @@ struct nft_trans_flowtable {
(((struct nft_trans_flowtable *)trans->data)->flowtable)
int __init nft_chain_filter_init(void);
-void __exit nft_chain_filter_fini(void);
+void nft_chain_filter_fini(void);
#endif /* _NET_NF_TABLES_H */
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index e0c0c2558ec4..8da837d2aaf9 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -65,4 +65,17 @@ extern const struct nft_expr_ops nft_payload_fast_ops;
extern struct static_key_false nft_counters_enabled;
extern struct static_key_false nft_trace_enabled;
+extern struct nft_set_type nft_set_rhash_type;
+extern struct nft_set_type nft_set_hash_type;
+extern struct nft_set_type nft_set_hash_fast_type;
+extern struct nft_set_type nft_set_rbtree_type;
+extern struct nft_set_type nft_set_bitmap_type;
+
+struct nft_expr;
+struct nft_regs;
+struct nft_pktinfo;
+void nft_meta_get_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
+void nft_lookup_eval(const struct nft_expr *expr,
+ struct nft_regs *regs, const struct nft_pktinfo *pkt);
#endif /* _NET_NF_TABLES_CORE_H */
diff --git a/include/net/netfilter/nf_tproxy.h b/include/net/netfilter/nf_tproxy.h
index 9754a50ecde9..82d0e41b76f2 100644
--- a/include/net/netfilter/nf_tproxy.h
+++ b/include/net/netfilter/nf_tproxy.h
@@ -17,6 +17,14 @@ static inline bool nf_tproxy_sk_is_transparent(struct sock *sk)
return false;
}
+/* assign a socket to the skb -- consumes sk */
+static inline void nf_tproxy_assign_sock(struct sk_buff *skb, struct sock *sk)
+{
+ skb_orphan(skb);
+ skb->sk = sk;
+ skb->destructor = sock_edemux;
+}
+
__be32 nf_tproxy_laddr4(struct sk_buff *skb, __be32 user_laddr, __be32 daddr);
/**
@@ -64,7 +72,7 @@ nf_tproxy_handle_time_wait4(struct net *net, struct sk_buff *skb,
* belonging to established connections going through that one.
*/
struct sock *
-nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb, void *hp,
+nf_tproxy_get_sock_v4(struct net *net, struct sk_buff *skb,
const u8 protocol,
const __be32 saddr, const __be32 daddr,
const __be16 sport, const __be16 dport,
@@ -103,7 +111,7 @@ nf_tproxy_handle_time_wait6(struct sk_buff *skb, int tproto, int thoff,
struct sock *sk);
struct sock *
-nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff, void *hp,
+nf_tproxy_get_sock_v6(struct net *net, struct sk_buff *skb, int thoff,
const u8 protocol,
const struct in6_addr *saddr, const struct in6_addr *daddr,
const __be16 sport, const __be16 dport,
diff --git a/include/net/netns/hash.h b/include/net/netns/hash.h
index 24c78183a4c2..16a842456189 100644
--- a/include/net/netns/hash.h
+++ b/include/net/netns/hash.h
@@ -9,12 +9,7 @@ struct net;
static inline u32 net_hash_mix(const struct net *net)
{
#ifdef CONFIG_NET_NS
- /*
- * shift this right to eliminate bits, that are
- * always zeroed
- */
-
- return (u32)(((unsigned long)net) >> L1_CACHE_SHIFT);
+ return (u32)(((unsigned long)net) >> ilog2(sizeof(*net)));
#else
return 0;
#endif
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 661348f23ea5..e47503b4e4d1 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -98,6 +98,7 @@ struct netns_ipv4 {
int sysctl_ip_default_ttl;
int sysctl_ip_no_pmtu_disc;
int sysctl_ip_fwd_use_pmtu;
+ int sysctl_ip_fwd_update_priority;
int sysctl_ip_nonlocal_bind;
/* Shall we try to damage output packets if routing dev changes? */
int sysctl_ip_dynaddr;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index c978a31b0f84..f0e396ab9bec 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -32,6 +32,7 @@ struct netns_sysctl_ipv6 {
int flowlabel_consistency;
int auto_flowlabels;
int icmpv6_time;
+ int icmpv6_echo_ignore_all;
int anycast_src_echo_reply;
int ip_nonlocal_bind;
int fwmark_reflect;
@@ -109,7 +110,6 @@ struct netns_ipv6 {
#if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
struct netns_nf_frag {
- struct netns_sysctl_ipv6 sysctl;
struct netns_frags frags;
};
#endif
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 94767ea3a490..286fd960896f 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -7,6 +7,7 @@
struct netns_nftables {
struct list_head tables;
struct list_head commit_list;
+ struct mutex commit_mutex;
unsigned int base_seq;
u8 gencursor;
u8 validate_state;
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index a3c1a2c47cd4..ef727f71336e 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -7,12 +7,16 @@
#include <net/sch_generic.h>
#include <net/act_api.h>
+/* TC action not accessible from user space */
+#define TC_ACT_REINSERT (TC_ACT_VALUE_MAX + 1)
+
/* Basic packet classifier frontend definitions. */
struct tcf_walker {
int stop;
int skip;
int count;
+ unsigned long cookie;
int (*fn)(struct tcf_proto *, void *node, struct tcf_walker *);
};
@@ -36,9 +40,9 @@ struct tcf_block_cb;
bool tcf_queue_work(struct rcu_work *rwork, work_func_t func);
#ifdef CONFIG_NET_CLS
-struct tcf_chain *tcf_chain_get(struct tcf_block *block, u32 chain_index,
- bool create);
-void tcf_chain_put(struct tcf_chain *chain);
+struct tcf_chain *tcf_chain_get_by_act(struct tcf_block *block,
+ u32 chain_index);
+void tcf_chain_put_by_act(struct tcf_chain *chain);
void tcf_block_netif_keep_dst(struct tcf_block *block);
int tcf_block_get(struct tcf_block **p_block,
struct tcf_proto __rcu **p_filter_chain, struct Qdisc *q,
@@ -73,11 +77,13 @@ void tcf_block_cb_incref(struct tcf_block_cb *block_cb);
unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb);
struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident,
- void *cb_priv);
+ void *cb_priv,
+ struct netlink_ext_ack *extack);
int tcf_block_cb_register(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident,
- void *cb_priv);
-void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb);
+ void *cb_priv, struct netlink_ext_ack *extack);
+void __tcf_block_cb_unregister(struct tcf_block *block,
+ struct tcf_block_cb *block_cb);
void tcf_block_cb_unregister(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident);
@@ -161,7 +167,8 @@ unsigned int tcf_block_cb_decref(struct tcf_block_cb *block_cb)
static inline
struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident,
- void *cb_priv)
+ void *cb_priv,
+ struct netlink_ext_ack *extack)
{
return NULL;
}
@@ -169,13 +176,14 @@ struct tcf_block_cb *__tcf_block_cb_register(struct tcf_block *block,
static inline
int tcf_block_cb_register(struct tcf_block *block,
tc_setup_cb_t *cb, void *cb_ident,
- void *cb_priv)
+ void *cb_priv, struct netlink_ext_ack *extack)
{
return 0;
}
static inline
-void __tcf_block_cb_unregister(struct tcf_block_cb *block_cb)
+void __tcf_block_cb_unregister(struct tcf_block *block,
+ struct tcf_block_cb *block_cb)
{
}
@@ -596,6 +604,7 @@ struct tc_block_offload {
enum tc_block_command command;
enum tcf_block_binder_type binder_type;
struct tcf_block *block;
+ struct netlink_ext_ack *extack;
};
struct tc_cls_common_offload {
@@ -715,6 +724,8 @@ enum tc_fl_command {
TC_CLSFLOWER_REPLACE,
TC_CLSFLOWER_DESTROY,
TC_CLSFLOWER_STATS,
+ TC_CLSFLOWER_TMPLT_CREATE,
+ TC_CLSFLOWER_TMPLT_DESTROY,
};
struct tc_cls_flower_offload {
@@ -771,6 +782,7 @@ struct tc_mqprio_qopt_offload {
struct tc_cookie {
u8 *data;
u32 len;
+ struct rcu_head rcu;
};
struct tc_qopt_offload_stats {
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index 815b92a23936..7dc769e5452b 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -72,6 +72,8 @@ struct qdisc_watchdog {
struct Qdisc *qdisc;
};
+void qdisc_watchdog_init_clockid(struct qdisc_watchdog *wd, struct Qdisc *qdisc,
+ clockid_t clockid);
void qdisc_watchdog_init(struct qdisc_watchdog *wd, struct Qdisc *qdisc);
void qdisc_watchdog_schedule_ns(struct qdisc_watchdog *wd, u64 expires);
@@ -153,4 +155,9 @@ struct tc_cbs_qopt_offload {
s32 sendslope;
};
+struct tc_etf_qopt_offload {
+ u8 enable;
+ s32 queue;
+};
+
#endif
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 6488daa32f82..a6d00093f35e 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -20,6 +20,9 @@ struct qdisc_walker;
struct tcf_walker;
struct module;
+typedef int tc_setup_cb_t(enum tc_setup_type type,
+ void *type_data, void *cb_priv);
+
struct qdisc_rate_table {
struct tc_ratespec rate;
u32 data[256];
@@ -232,9 +235,17 @@ struct tcf_result {
u32 classid;
};
const struct tcf_proto *goto_tp;
+
+ /* used by the TC_ACT_REINSERT action */
+ struct {
+ bool ingress;
+ struct gnet_stats_queue *qstats;
+ };
};
};
+struct tcf_chain;
+
struct tcf_proto_ops {
struct list_head head;
char kind[IFNAMSIZ];
@@ -256,11 +267,22 @@ struct tcf_proto_ops {
bool *last,
struct netlink_ext_ack *);
void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
+ int (*reoffload)(struct tcf_proto *tp, bool add,
+ tc_setup_cb_t *cb, void *cb_priv,
+ struct netlink_ext_ack *extack);
void (*bind_class)(void *, u32, unsigned long);
+ void * (*tmplt_create)(struct net *net,
+ struct tcf_chain *chain,
+ struct nlattr **tca,
+ struct netlink_ext_ack *extack);
+ void (*tmplt_destroy)(void *tmplt_priv);
/* rtnetlink specific */
int (*dump)(struct net*, struct tcf_proto*, void *,
struct sk_buff *skb, struct tcmsg*);
+ int (*tmplt_dump)(struct sk_buff *skb,
+ struct net *net,
+ void *tmplt_priv);
struct module *owner;
};
@@ -269,6 +291,8 @@ struct tcf_proto {
/* Fast access part */
struct tcf_proto __rcu *next;
void __rcu *root;
+
+ /* called under RCU BH lock*/
int (*classify)(struct sk_buff *,
const struct tcf_proto *,
struct tcf_result *);
@@ -294,11 +318,14 @@ typedef void tcf_chain_head_change_t(struct tcf_proto *tp_head, void *priv);
struct tcf_chain {
struct tcf_proto __rcu *filter_chain;
- struct list_head filter_chain_list;
struct list_head list;
struct tcf_block *block;
u32 index; /* chain index */
unsigned int refcnt;
+ unsigned int action_refcnt;
+ bool explicitly_created;
+ const struct tcf_proto_ops *tmplt_ops;
+ void *tmplt_priv;
};
struct tcf_block {
@@ -312,6 +339,10 @@ struct tcf_block {
bool keep_dst;
unsigned int offloadcnt; /* Number of oddloaded filters */
unsigned int nooffloaddevcnt; /* Number of devs unable to do offload */
+ struct {
+ struct tcf_chain *chain;
+ struct list_head filter_chain_list;
+ } chain0;
};
static inline void tcf_block_offload_inc(struct tcf_block *block, u32 *flags)
@@ -330,6 +361,21 @@ static inline void tcf_block_offload_dec(struct tcf_block *block, u32 *flags)
block->offloadcnt--;
}
+static inline void
+tc_cls_offload_cnt_update(struct tcf_block *block, unsigned int *cnt,
+ u32 *flags, bool add)
+{
+ if (add) {
+ if (!*cnt)
+ tcf_block_offload_inc(block, flags);
+ (*cnt)++;
+ } else {
+ (*cnt)--;
+ if (!*cnt)
+ tcf_block_offload_dec(block, flags);
+ }
+}
+
static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
{
struct qdisc_skb_cb *qcb;
@@ -529,6 +575,15 @@ static inline void skb_reset_tc(struct sk_buff *skb)
#endif
}
+static inline bool skb_is_tc_redirected(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ return skb->tc_redirected;
+#else
+ return false;
+#endif
+}
+
static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
@@ -1068,4 +1123,17 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
struct mini_Qdisc __rcu **p_miniq);
+static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
+{
+ struct gnet_stats_queue *stats = res->qstats;
+ int ret;
+
+ if (res->ingress)
+ ret = netif_receive_skb(skb);
+ else
+ ret = dev_queue_xmit(skb);
+ if (ret && stats)
+ qstats_overlimit_inc(res->qstats);
+}
+
#endif
diff --git a/include/net/scm.h b/include/net/scm.h
index 903771c8d4e3..1ce365f4c256 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -8,6 +8,7 @@
#include <linux/security.h>
#include <linux/pid.h>
#include <linux/nsproxy.h>
+#include <linux/sched/signal.h>
/* Well, we should have at least one descriptor open
* to accept passed FDs 8)
diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
index 30b3e2fe240a..8c2caa370e0f 100644
--- a/include/net/sctp/sctp.h
+++ b/include/net/sctp/sctp.h
@@ -109,7 +109,8 @@ int sctp_backlog_rcv(struct sock *sk, struct sk_buff *skb);
int sctp_inet_listen(struct socket *sock, int backlog);
void sctp_write_space(struct sock *sk);
void sctp_data_ready(struct sock *sk);
-__poll_t sctp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t sctp_poll(struct file *file, struct socket *sock,
+ poll_table *wait);
void sctp_sock_rfree(struct sk_buff *skb);
void sctp_copy_sock(struct sock *newsk, struct sock *sk,
struct sctp_association *asoc);
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index dbe1b911a24d..28a7c8e44636 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -48,7 +48,7 @@
#define __sctp_structs_h__
#include <linux/ktime.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
#include <linux/socket.h> /* linux/in.h needs this!! */
#include <linux/in.h> /* We get struct sockaddr_in. */
#include <linux/in6.h> /* We get struct in6_addr */
@@ -57,6 +57,7 @@
#include <linux/atomic.h> /* This gets us atomic counters. */
#include <linux/skbuff.h> /* We need sk_buff_head. */
#include <linux/workqueue.h> /* We need tq_struct. */
+#include <linux/flex_array.h> /* We need flex_array. */
#include <linux/sctp.h> /* We need sctp* header structs. */
#include <net/sctp/auth.h> /* We need auth specific structs */
#include <net/ip.h> /* For inet_skb_parm */
@@ -193,6 +194,9 @@ struct sctp_sock {
/* This is the max_retrans value for new associations. */
__u16 pathmaxrxt;
+ __u32 flowlabel;
+ __u8 dscp;
+
/* The initial Path MTU to use for new associations. */
__u32 pathmtu;
@@ -220,6 +224,7 @@ struct sctp_sock {
__u32 adaptation_ind;
__u32 pd_point;
__u16 nodelay:1,
+ reuse:1,
disable_fragments:1,
v4mapped:1,
frag_interleave:1,
@@ -394,37 +399,35 @@ void sctp_stream_update(struct sctp_stream *stream, struct sctp_stream *new);
/* What is the current SSN number for this stream? */
#define sctp_ssn_peek(stream, type, sid) \
- ((stream)->type[sid].ssn)
+ (sctp_stream_##type((stream), (sid))->ssn)
/* Return the next SSN number for this stream. */
#define sctp_ssn_next(stream, type, sid) \
- ((stream)->type[sid].ssn++)
+ (sctp_stream_##type((stream), (sid))->ssn++)
/* Skip over this ssn and all below. */
#define sctp_ssn_skip(stream, type, sid, ssn) \
- ((stream)->type[sid].ssn = ssn + 1)
+ (sctp_stream_##type((stream), (sid))->ssn = ssn + 1)
/* What is the current MID number for this stream? */
#define sctp_mid_peek(stream, type, sid) \
- ((stream)->type[sid].mid)
+ (sctp_stream_##type((stream), (sid))->mid)
/* Return the next MID number for this stream. */
#define sctp_mid_next(stream, type, sid) \
- ((stream)->type[sid].mid++)
+ (sctp_stream_##type((stream), (sid))->mid++)
/* Skip over this mid and all below. */
#define sctp_mid_skip(stream, type, sid, mid) \
- ((stream)->type[sid].mid = mid + 1)
-
-#define sctp_stream_in(asoc, sid) (&(asoc)->stream.in[sid])
+ (sctp_stream_##type((stream), (sid))->mid = mid + 1)
/* What is the current MID_uo number for this stream? */
#define sctp_mid_uo_peek(stream, type, sid) \
- ((stream)->type[sid].mid_uo)
+ (sctp_stream_##type((stream), (sid))->mid_uo)
/* Return the next MID_uo number for this stream. */
#define sctp_mid_uo_next(stream, type, sid) \
- ((stream)->type[sid].mid_uo++)
+ (sctp_stream_##type((stream), (sid))->mid_uo++)
/*
* Pointers to address related SCTP functions.
@@ -894,6 +897,9 @@ struct sctp_transport {
*/
__u16 pathmaxrxt;
+ __u32 flowlabel;
+ __u8 dscp;
+
/* This is the partially failed retrans value for the transport
* and will be initialized from the assocs value. This can be changed
* using the SCTP_PEER_ADDR_THLDS socket option
@@ -1433,8 +1439,8 @@ struct sctp_stream_in {
};
struct sctp_stream {
- struct sctp_stream_out *out;
- struct sctp_stream_in *in;
+ struct flex_array *out;
+ struct flex_array *in;
__u16 outcnt;
__u16 incnt;
/* Current stream being sent, if any */
@@ -1456,6 +1462,23 @@ struct sctp_stream {
struct sctp_stream_interleave *si;
};
+static inline struct sctp_stream_out *sctp_stream_out(
+ const struct sctp_stream *stream,
+ __u16 sid)
+{
+ return flex_array_get(stream->out, sid);
+}
+
+static inline struct sctp_stream_in *sctp_stream_in(
+ const struct sctp_stream *stream,
+ __u16 sid)
+{
+ return flex_array_get(stream->in, sid);
+}
+
+#define SCTP_SO(s, i) sctp_stream_out((s), (i))
+#define SCTP_SI(s, i) sctp_stream_in((s), (i))
+
#define SCTP_STREAM_CLOSED 0x00
#define SCTP_STREAM_OPEN 0x01
@@ -1771,6 +1794,9 @@ struct sctp_association {
*/
__u16 pathmaxrxt;
+ __u32 flowlabel;
+ __u8 dscp;
+
/* Flag that path mtu update is pending */
__u8 pmtu_pending;
diff --git a/include/net/seg6.h b/include/net/seg6.h
index e029e301faa5..2567941a2f32 100644
--- a/include/net/seg6.h
+++ b/include/net/seg6.h
@@ -18,7 +18,7 @@
#include <linux/ipv6.h>
#include <net/lwtunnel.h>
#include <linux/seg6.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
static inline void update_csum_diff4(struct sk_buff *skb, __be32 from,
__be32 to)
diff --git a/include/net/seg6_hmac.h b/include/net/seg6_hmac.h
index 69c3a106056b..7fda469e2758 100644
--- a/include/net/seg6_hmac.h
+++ b/include/net/seg6_hmac.h
@@ -22,7 +22,7 @@
#include <linux/route.h>
#include <net/seg6.h>
#include <linux/seg6_hmac.h>
-#include <linux/rhashtable.h>
+#include <linux/rhashtable-types.h>
#define SEG6_HMAC_MAX_DIGESTSIZE 160
#define SEG6_HMAC_RING_SIZE 256
diff --git a/include/net/seg6_local.h b/include/net/seg6_local.h
index 661fd5b4d3e0..08359e2d8b35 100644
--- a/include/net/seg6_local.h
+++ b/include/net/seg6_local.h
@@ -21,10 +21,12 @@
extern int seg6_lookup_nexthop(struct sk_buff *skb, struct in6_addr *nhaddr,
u32 tbl_id);
+extern bool seg6_bpf_has_valid_srh(struct sk_buff *skb);
struct seg6_bpf_srh_state {
- bool valid;
+ struct ipv6_sr_hdr *srh;
u16 hdrlen;
+ bool valid;
};
DECLARE_PER_CPU(struct seg6_bpf_srh_state, seg6_bpf_srh_states);
diff --git a/include/net/smc.h b/include/net/smc.h
index 8381d163fefa..9ef49f8b1002 100644
--- a/include/net/smc.h
+++ b/include/net/smc.h
@@ -11,6 +11,8 @@
#ifndef _SMC_H
#define _SMC_H
+#define SMC_MAX_PNETID_LEN 16 /* Max. length of PNET id */
+
struct smc_hashinfo {
rwlock_t lock;
struct hlist_head ht;
@@ -18,4 +20,67 @@ struct smc_hashinfo {
int smc_hash_sk(struct sock *sk);
void smc_unhash_sk(struct sock *sk);
+
+/* SMCD/ISM device driver interface */
+struct smcd_dmb {
+ u64 dmb_tok;
+ u64 rgid;
+ u32 dmb_len;
+ u32 sba_idx;
+ u32 vlan_valid;
+ u32 vlan_id;
+ void *cpu_addr;
+ dma_addr_t dma_addr;
+};
+
+#define ISM_EVENT_DMB 0
+#define ISM_EVENT_GID 1
+#define ISM_EVENT_SWR 2
+
+struct smcd_event {
+ u32 type;
+ u32 code;
+ u64 tok;
+ u64 time;
+ u64 info;
+};
+
+struct smcd_dev;
+
+struct smcd_ops {
+ int (*query_remote_gid)(struct smcd_dev *dev, u64 rgid, u32 vid_valid,
+ u32 vid);
+ int (*register_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
+ int (*unregister_dmb)(struct smcd_dev *dev, struct smcd_dmb *dmb);
+ int (*add_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
+ int (*del_vlan_id)(struct smcd_dev *dev, u64 vlan_id);
+ int (*set_vlan_required)(struct smcd_dev *dev);
+ int (*reset_vlan_required)(struct smcd_dev *dev);
+ int (*signal_event)(struct smcd_dev *dev, u64 rgid, u32 trigger_irq,
+ u32 event_code, u64 info);
+ int (*move_data)(struct smcd_dev *dev, u64 dmb_tok, unsigned int idx,
+ bool sf, unsigned int offset, void *data,
+ unsigned int size);
+};
+
+struct smcd_dev {
+ const struct smcd_ops *ops;
+ struct device dev;
+ void *priv;
+ u64 local_gid;
+ struct list_head list;
+ spinlock_t lock;
+ struct smc_connection **conn;
+ struct list_head vlan;
+ struct workqueue_struct *event_wq;
+ u8 pnetid[SMC_MAX_PNETID_LEN];
+};
+
+struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
+ const struct smcd_ops *ops, int max_dmbs);
+int smcd_register_dev(struct smcd_dev *smcd);
+void smcd_unregister_dev(struct smcd_dev *smcd);
+void smcd_free_dev(struct smcd_dev *smcd);
+void smcd_handle_event(struct smcd_dev *dev, struct smcd_event *event);
+void smcd_handle_irq(struct smcd_dev *dev, unsigned int bit);
#endif /* _SMC_H */
diff --git a/include/net/sock.h b/include/net/sock.h
index b3b75419eafe..433f45fc2d68 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -139,6 +139,7 @@ typedef __u64 __bitwise __addrpair;
* @skc_node: main hash linkage for various protocol lookup tables
* @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
* @skc_tx_queue_mapping: tx queue number for this connection
+ * @skc_rx_queue_mapping: rx queue number for this connection
* @skc_flags: place holder for sk_flags
* %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
* %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
@@ -214,7 +215,10 @@ struct sock_common {
struct hlist_node skc_node;
struct hlist_nulls_node skc_nulls_node;
};
- int skc_tx_queue_mapping;
+ unsigned short skc_tx_queue_mapping;
+#ifdef CONFIG_XPS
+ unsigned short skc_rx_queue_mapping;
+#endif
union {
int skc_incoming_cpu;
u32 skc_rcv_wnd;
@@ -315,6 +319,9 @@ struct sock_common {
* @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
* @sk_reuseport_cb: reuseport group container
* @sk_rcu: used during RCU grace period
+ * @sk_clockid: clockid used by time-based scheduling (SO_TXTIME)
+ * @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME
+ * @sk_txtime_unused: unused txtime flags
*/
struct sock {
/*
@@ -326,6 +333,9 @@ struct sock {
#define sk_nulls_node __sk_common.skc_nulls_node
#define sk_refcnt __sk_common.skc_refcnt
#define sk_tx_queue_mapping __sk_common.skc_tx_queue_mapping
+#ifdef CONFIG_XPS
+#define sk_rx_queue_mapping __sk_common.skc_rx_queue_mapping
+#endif
#define sk_dontcopy_begin __sk_common.skc_dontcopy_begin
#define sk_dontcopy_end __sk_common.skc_dontcopy_end
@@ -468,6 +478,12 @@ struct sock {
u8 sk_shutdown;
u32 sk_tskey;
atomic_t sk_zckey;
+
+ u8 sk_clockid;
+ u8 sk_txtime_deadline_mode : 1,
+ sk_txtime_report_errors : 1,
+ sk_txtime_unused : 6;
+
struct socket *sk_socket;
void *sk_user_data;
#ifdef CONFIG_SECURITY
@@ -783,6 +799,7 @@ enum sock_flags {
SOCK_FILTER_LOCKED, /* Filter cannot be changed anymore */
SOCK_SELECT_ERR_QUEUE, /* Wake select on error queue */
SOCK_RCU_FREE, /* wait rcu grace period in sk_destruct() */
+ SOCK_TXTIME,
};
#define SK_FLAGS_TIMESTAMP ((1UL << SOCK_TIMESTAMP) | (1UL << SOCK_TIMESTAMPING_RX_SOFTWARE))
@@ -1578,10 +1595,17 @@ void sock_kzfree_s(struct sock *sk, void *mem, int size);
void sk_send_sigurg(struct sock *sk);
struct sockcm_cookie {
+ u64 transmit_time;
u32 mark;
u16 tsflags;
};
+static inline void sockcm_init(struct sockcm_cookie *sockc,
+ const struct sock *sk)
+{
+ *sockc = (struct sockcm_cookie) { .tsflags = sk->sk_tsflags };
+}
+
int __sock_cmsg_send(struct sock *sk, struct msghdr *msg, struct cmsghdr *cmsg,
struct sockcm_cookie *sockc);
int sock_cmsg_send(struct sock *sk, struct msghdr *msg,
@@ -1681,19 +1705,58 @@ static inline int sk_receive_skb(struct sock *sk, struct sk_buff *skb,
static inline void sk_tx_queue_set(struct sock *sk, int tx_queue)
{
+ /* sk_tx_queue_mapping accept only upto a 16-bit value */
+ if (WARN_ON_ONCE((unsigned short)tx_queue >= USHRT_MAX))
+ return;
sk->sk_tx_queue_mapping = tx_queue;
}
+#define NO_QUEUE_MAPPING USHRT_MAX
+
static inline void sk_tx_queue_clear(struct sock *sk)
{
- sk->sk_tx_queue_mapping = -1;
+ sk->sk_tx_queue_mapping = NO_QUEUE_MAPPING;
}
static inline int sk_tx_queue_get(const struct sock *sk)
{
- return sk ? sk->sk_tx_queue_mapping : -1;
+ if (sk && sk->sk_tx_queue_mapping != NO_QUEUE_MAPPING)
+ return sk->sk_tx_queue_mapping;
+
+ return -1;
}
+static inline void sk_rx_queue_set(struct sock *sk, const struct sk_buff *skb)
+{
+#ifdef CONFIG_XPS
+ if (skb_rx_queue_recorded(skb)) {
+ u16 rx_queue = skb_get_rx_queue(skb);
+
+ if (WARN_ON_ONCE(rx_queue == NO_QUEUE_MAPPING))
+ return;
+
+ sk->sk_rx_queue_mapping = rx_queue;
+ }
+#endif
+}
+
+static inline void sk_rx_queue_clear(struct sock *sk)
+{
+#ifdef CONFIG_XPS
+ sk->sk_rx_queue_mapping = NO_QUEUE_MAPPING;
+#endif
+}
+
+#ifdef CONFIG_XPS
+static inline int sk_rx_queue_get(const struct sock *sk)
+{
+ if (sk && sk->sk_rx_queue_mapping != NO_QUEUE_MAPPING)
+ return sk->sk_rx_queue_mapping;
+
+ return -1;
+}
+#endif
+
static inline void sk_set_socket(struct sock *sk, struct socket *sock)
{
sk_tx_queue_clear(sk);
@@ -1725,7 +1788,7 @@ static inline void sock_graft(struct sock *sk, struct socket *parent)
{
WARN_ON(parent->sk);
write_lock_bh(&sk->sk_callback_lock);
- sk->sk_wq = parent->wq;
+ rcu_assign_pointer(sk->sk_wq, parent->wq);
parent->sk = sk;
sk_set_socket(sk, parent);
sk->sk_uid = SOCK_INODE(parent)->i_uid;
@@ -1994,16 +2057,16 @@ static inline bool skwq_has_sleeper(struct socket_wq *wq)
/**
* sock_poll_wait - place memory barrier behind the poll_wait call.
* @filp: file
- * @wait_address: socket wait queue
* @p: poll_table
*
* See the comments in the wq_has_sleeper function.
*/
-static inline void sock_poll_wait(struct file *filp,
- wait_queue_head_t *wait_address, poll_table *p)
+static inline void sock_poll_wait(struct file *filp, poll_table *p)
{
- if (!poll_does_not_wait(p) && wait_address) {
- poll_wait(filp, wait_address, p);
+ struct socket *sock = filp->private_data;
+
+ if (!poll_does_not_wait(p)) {
+ poll_wait(filp, &sock->wq->wait, p);
/* We need to be sure we are in sync with the
* socket flags modification.
*
diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
index 0054b3a9b923..8a5f70c7cdf2 100644
--- a/include/net/sock_reuseport.h
+++ b/include/net/sock_reuseport.h
@@ -5,25 +5,36 @@
#include <linux/filter.h>
#include <linux/skbuff.h>
#include <linux/types.h>
+#include <linux/spinlock.h>
#include <net/sock.h>
+extern spinlock_t reuseport_lock;
+
struct sock_reuseport {
struct rcu_head rcu;
u16 max_socks; /* length of socks */
u16 num_socks; /* elements in socks */
+ /* The last synq overflow event timestamp of this
+ * reuse->socks[] group.
+ */
+ unsigned int synq_overflow_ts;
+ /* ID stays the same even after the size of socks[] grows. */
+ unsigned int reuseport_id;
+ bool bind_inany;
struct bpf_prog __rcu *prog; /* optional BPF sock selector */
struct sock *socks[0]; /* array of sock pointers */
};
-extern int reuseport_alloc(struct sock *sk);
-extern int reuseport_add_sock(struct sock *sk, struct sock *sk2);
+extern int reuseport_alloc(struct sock *sk, bool bind_inany);
+extern int reuseport_add_sock(struct sock *sk, struct sock *sk2,
+ bool bind_inany);
extern void reuseport_detach_sock(struct sock *sk);
extern struct sock *reuseport_select_sock(struct sock *sk,
u32 hash,
struct sk_buff *skb,
int hdr_len);
-extern struct bpf_prog *reuseport_attach_prog(struct sock *sk,
- struct bpf_prog *prog);
+extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
+int reuseport_get_id(struct sock_reuseport *reuse);
#endif /* _SOCK_REUSEPORT_H */
diff --git a/include/net/tc_act/tc_csum.h b/include/net/tc_act/tc_csum.h
index 9470fd7e4350..32d2454c0479 100644
--- a/include/net/tc_act/tc_csum.h
+++ b/include/net/tc_act/tc_csum.h
@@ -7,7 +7,6 @@
#include <linux/tc_act/tc_csum.h>
struct tcf_csum_params {
- int action;
u32 update_flags;
struct rcu_head rcu;
};
diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h
index 227a6f1d02f4..fac3ad4a86de 100644
--- a/include/net/tc_act/tc_pedit.h
+++ b/include/net/tc_act/tc_pedit.h
@@ -17,6 +17,7 @@ struct tcf_pedit {
struct tc_pedit_key *tcfp_keys;
struct tcf_pedit_key_ex *tcfp_keys_ex;
};
+
#define to_pedit(a) ((struct tcf_pedit *)a)
static inline bool is_tcf_pedit(const struct tc_action *a)
diff --git a/include/net/tc_act/tc_skbedit.h b/include/net/tc_act/tc_skbedit.h
index 19cd3d345804..911bbac838a2 100644
--- a/include/net/tc_act/tc_skbedit.h
+++ b/include/net/tc_act/tc_skbedit.h
@@ -22,14 +22,19 @@
#include <net/act_api.h>
#include <linux/tc_act/tc_skbedit.h>
+struct tcf_skbedit_params {
+ u32 flags;
+ u32 priority;
+ u32 mark;
+ u32 mask;
+ u16 queue_mapping;
+ u16 ptype;
+ struct rcu_head rcu;
+};
+
struct tcf_skbedit {
- struct tc_action common;
- u32 flags;
- u32 priority;
- u32 mark;
- u32 mask;
- u16 queue_mapping;
- u16 ptype;
+ struct tc_action common;
+ struct tcf_skbedit_params __rcu *params;
};
#define to_skbedit(a) ((struct tcf_skbedit *)a)
@@ -37,15 +42,27 @@ struct tcf_skbedit {
static inline bool is_tcf_skbedit_mark(const struct tc_action *a)
{
#ifdef CONFIG_NET_CLS_ACT
- if (a->ops && a->ops->type == TCA_ACT_SKBEDIT)
- return to_skbedit(a)->flags == SKBEDIT_F_MARK;
+ u32 flags;
+
+ if (a->ops && a->ops->type == TCA_ACT_SKBEDIT) {
+ rcu_read_lock();
+ flags = rcu_dereference(to_skbedit(a)->params)->flags;
+ rcu_read_unlock();
+ return flags == SKBEDIT_F_MARK;
+ }
#endif
return false;
}
static inline u32 tcf_skbedit_mark(const struct tc_action *a)
{
- return to_skbedit(a)->mark;
+ u32 mark;
+
+ rcu_read_lock();
+ mark = rcu_dereference(to_skbedit(a)->params)->mark;
+ rcu_read_unlock();
+
+ return mark;
}
#endif /* __NET_TC_SKBEDIT_H */
diff --git a/include/net/tc_act/tc_tunnel_key.h b/include/net/tc_act/tc_tunnel_key.h
index efef0b4b1b2b..46b8c7f1c8d5 100644
--- a/include/net/tc_act/tc_tunnel_key.h
+++ b/include/net/tc_act/tc_tunnel_key.h
@@ -18,7 +18,6 @@
struct tcf_tunnel_key_params {
struct rcu_head rcu;
int tcft_action;
- int action;
struct metadata_dst *tcft_enc_metadata;
};
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 0448e7c5d2b4..770917d0caa7 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -36,6 +36,7 @@
#include <net/inet_hashtables.h>
#include <net/checksum.h>
#include <net/request_sock.h>
+#include <net/sock_reuseport.h>
#include <net/sock.h>
#include <net/snmp.h>
#include <net/ip.h>
@@ -342,6 +343,7 @@ ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
struct pipe_inode_info *pipe, size_t len,
unsigned int flags);
+void tcp_enter_quickack_mode(struct sock *sk, unsigned int max_quickacks);
static inline void tcp_dec_quickack_mode(struct sock *sk,
const unsigned int pkts)
{
@@ -388,7 +390,8 @@ bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
void tcp_close(struct sock *sk, long timeout);
void tcp_init_sock(struct sock *sk);
void tcp_init_transfer(struct sock *sk, int bpf_op);
-__poll_t tcp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t tcp_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
int tcp_getsockopt(struct sock *sk, int level, int optname,
char __user *optval, int __user *optlen);
int tcp_setsockopt(struct sock *sk, int level, int optname,
@@ -471,19 +474,45 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
*/
static inline void tcp_synq_overflow(const struct sock *sk)
{
- unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
- unsigned long now = jiffies;
+ unsigned int last_overflow;
+ unsigned int now = jiffies;
- if (time_after(now, last_overflow + HZ))
+ if (sk->sk_reuseport) {
+ struct sock_reuseport *reuse;
+
+ reuse = rcu_dereference(sk->sk_reuseport_cb);
+ if (likely(reuse)) {
+ last_overflow = READ_ONCE(reuse->synq_overflow_ts);
+ if (time_after32(now, last_overflow + HZ))
+ WRITE_ONCE(reuse->synq_overflow_ts, now);
+ return;
+ }
+ }
+
+ last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+ if (time_after32(now, last_overflow + HZ))
tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
}
/* syncookies: no recent synqueue overflow on this listening socket? */
static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
{
- unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+ unsigned int last_overflow;
+ unsigned int now = jiffies;
+
+ if (sk->sk_reuseport) {
+ struct sock_reuseport *reuse;
- return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
+ reuse = rcu_dereference(sk->sk_reuseport_cb);
+ if (likely(reuse)) {
+ last_overflow = READ_ONCE(reuse->synq_overflow_ts);
+ return time_after32(now, last_overflow +
+ TCP_SYNCOOKIE_VALID);
+ }
+ }
+
+ last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
+ return time_after32(now, last_overflow + TCP_SYNCOOKIE_VALID);
}
static inline u32 tcp_cookie_time(void)
@@ -538,6 +567,7 @@ void tcp_send_fin(struct sock *sk);
void tcp_send_active_reset(struct sock *sk, gfp_t priority);
int tcp_send_synack(struct sock *);
void tcp_push_one(struct sock *, unsigned int mss_now);
+void __tcp_send_ack(struct sock *sk, u32 rcv_nxt);
void tcp_send_ack(struct sock *sk);
void tcp_send_delayed_ack(struct sock *sk);
void tcp_send_loss_probe(struct sock *sk);
@@ -827,6 +857,10 @@ struct tcp_skb_cb {
#define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
+static inline void bpf_compute_data_end_sk_skb(struct sk_buff *skb)
+{
+ TCP_SKB_CB(skb)->bpf.data_end = skb->data + skb_headlen(skb);
+}
#if IS_ENABLED(CONFIG_IPV6)
/* This is the variant of inet6_iif() that must be used by TCP,
@@ -834,6 +868,11 @@ struct tcp_skb_cb {
*/
static inline int tcp_v6_iif(const struct sk_buff *skb)
{
+ return TCP_SKB_CB(skb)->header.h6.iif;
+}
+
+static inline int tcp_v6_iif_l3_slave(const struct sk_buff *skb)
+{
bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
@@ -907,8 +946,6 @@ enum tcp_ca_event {
CA_EVENT_LOSS, /* loss timeout */
CA_EVENT_ECN_NO_CE, /* ECT set, but not CE marked */
CA_EVENT_ECN_IS_CE, /* received CE marked IP packet */
- CA_EVENT_DELAYED_ACK, /* Delayed ack is sent */
- CA_EVENT_NON_DELAYED_ACK,
};
/* Information about inbound ACK, passed to cong_ops->in_ack_event() */
@@ -953,6 +990,8 @@ struct rate_sample {
u32 prior_delivered; /* tp->delivered at "prior_mstamp" */
s32 delivered; /* number of packets delivered over interval */
long interval_us; /* time for tp->delivered to incr "delivered" */
+ u32 snd_interval_us; /* snd interval for delivered packets */
+ u32 rcv_interval_us; /* rcv interval for delivered packets */
long rtt_us; /* RTT of last (S)ACKed packet (or -1) */
int losses; /* number of packets marked lost upon ACK */
u32 acked_sacked; /* number of packets newly (S)ACKed upon ACK */
@@ -1184,6 +1223,17 @@ static inline bool tcp_is_cwnd_limited(const struct sock *sk)
return tp->is_cwnd_limited;
}
+/* BBR congestion control needs pacing.
+ * Same remark for SO_MAX_PACING_RATE.
+ * sch_fq packet scheduler is efficiently handling pacing,
+ * but is not always installed/used.
+ * Return true if TCP stack should pace packets itself.
+ */
+static inline bool tcp_needs_internal_pacing(const struct sock *sk)
+{
+ return smp_load_acquire(&sk->sk_pacing_status) == SK_PACING_NEEDED;
+}
+
/* Something is really bad, we could not queue an additional packet,
* because qdisc is full or receiver sent a 0 window.
* We do not want to add fuel to the fire, or abort too early,
@@ -1361,7 +1411,8 @@ static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
{
if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
return true;
- if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
+ if (unlikely(!time_before32(ktime_get_seconds(),
+ rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS)))
return true;
/*
* Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
@@ -1391,7 +1442,8 @@ static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
However, we can relax time bounds for RST segments to MSL.
*/
- if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
+ if (rst && !time_before32(ktime_get_seconds(),
+ rx_opt->ts_recent_stamp + TCP_PAWS_MSL))
return false;
return true;
}
@@ -1777,7 +1829,7 @@ void tcp_v4_destroy_sock(struct sock *sk);
struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
netdev_features_t features);
-struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
+struct sk_buff *tcp_gro_receive(struct list_head *head, struct sk_buff *skb);
int tcp_gro_complete(struct sk_buff *skb);
void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
@@ -2013,6 +2065,10 @@ int tcp_set_ulp_id(struct sock *sk, const int ulp);
void tcp_get_available_ulp(char *buf, size_t len);
void tcp_cleanup_ulp(struct sock *sk);
+#define MODULE_ALIAS_TCP_ULP(name) \
+ __MODULE_INFO(alias, alias_userspace, name); \
+ __MODULE_INFO(alias, alias_tcp_ulp, "tcp-ulp-" name)
+
/* Call BPF_SOCK_OPS program that returns an int. If the return value
* is < 0, then the BPF op failed (for example if the loaded BPF
* program does not support the chosen operation or there is no BPF
diff --git a/include/net/tls.h b/include/net/tls.h
index 7f84ea3e217c..d5c683e8bb22 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -83,6 +83,16 @@ struct tls_device {
void (*unhash)(struct tls_device *device, struct sock *sk);
};
+enum {
+ TLS_BASE,
+ TLS_SW,
+#ifdef CONFIG_TLS_DEVICE
+ TLS_HW,
+#endif
+ TLS_HW_RECORD,
+ TLS_NUM_CONFIG,
+};
+
struct tls_sw_context_tx {
struct crypto_aead *aead_send;
struct crypto_wait async_wait;
@@ -109,14 +119,11 @@ struct tls_sw_context_rx {
struct strparser strp;
void (*saved_data_ready)(struct sock *sk);
- __poll_t (*sk_poll_mask)(struct socket *sock, __poll_t events);
+ unsigned int (*sk_poll)(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
struct sk_buff *recv_pkt;
u8 control;
bool decrypted;
-
- char rx_aad_ciphertext[TLS_AAD_SPACE_SIZE];
- char rx_aad_plaintext[TLS_AAD_SPACE_SIZE];
-
};
struct tls_record_info {
@@ -127,7 +134,7 @@ struct tls_record_info {
skb_frag_t frags[MAX_SKB_FRAGS];
};
-struct tls_offload_context {
+struct tls_offload_context_tx {
struct crypto_aead *aead_send;
spinlock_t lock; /* protects records list */
struct list_head records_list;
@@ -146,8 +153,8 @@ struct tls_offload_context {
#define TLS_DRIVER_STATE_SIZE (max_t(size_t, 8, sizeof(void *)))
};
-#define TLS_OFFLOAD_CONTEXT_SIZE \
- (ALIGN(sizeof(struct tls_offload_context), sizeof(void *)) + \
+#define TLS_OFFLOAD_CONTEXT_SIZE_TX \
+ (ALIGN(sizeof(struct tls_offload_context_tx), sizeof(void *)) + \
TLS_DRIVER_STATE_SIZE)
enum {
@@ -196,6 +203,7 @@ struct tls_context {
int (*push_pending_record)(struct sock *sk, int flags);
void (*sk_write_space)(struct sock *sk);
+ void (*sk_destruct)(struct sock *sk);
void (*sk_proto_close)(struct sock *sk, long timeout);
int (*setsockopt)(struct sock *sk, int level,
@@ -208,13 +216,27 @@ struct tls_context {
void (*unhash)(struct sock *sk);
};
+struct tls_offload_context_rx {
+ /* sw must be the first member of tls_offload_context_rx */
+ struct tls_sw_context_rx sw;
+ atomic64_t resync_req;
+ u8 driver_state[];
+ /* The TLS layer reserves room for driver specific state
+ * Currently the belief is that there is not enough
+ * driver specific state to justify another layer of indirection
+ */
+};
+
+#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
+ (ALIGN(sizeof(struct tls_offload_context_rx), sizeof(void *)) + \
+ TLS_DRIVER_STATE_SIZE)
+
int wait_on_pending_writer(struct sock *sk, long *timeo);
int tls_sk_query(struct sock *sk, int optname, char __user *optval,
int __user *optlen);
int tls_sk_attach(struct sock *sk, int optname, char __user *optval,
unsigned int optlen);
-
int tls_set_sw_offload(struct sock *sk, struct tls_context *ctx, int tx);
int tls_sw_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
int tls_sw_sendpage(struct sock *sk, struct page *page,
@@ -222,9 +244,11 @@ int tls_sw_sendpage(struct sock *sk, struct page *page,
void tls_sw_close(struct sock *sk, long timeout);
void tls_sw_free_resources_tx(struct sock *sk);
void tls_sw_free_resources_rx(struct sock *sk);
+void tls_sw_release_resources_rx(struct sock *sk);
int tls_sw_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
int nonblock, int flags, int *addr_len);
-__poll_t tls_sw_poll_mask(struct socket *sock, __poll_t events);
+unsigned int tls_sw_poll(struct file *file, struct socket *sock,
+ struct poll_table_struct *wait);
ssize_t tls_sw_splice_read(struct socket *sock, loff_t *ppos,
struct pipe_inode_info *pipe,
size_t len, unsigned int flags);
@@ -237,7 +261,7 @@ void tls_device_sk_destruct(struct sock *sk);
void tls_device_init(void);
void tls_device_cleanup(void);
-struct tls_record_info *tls_get_record(struct tls_offload_context *context,
+struct tls_record_info *tls_get_record(struct tls_offload_context_tx *context,
u32 seq, u64 *p_record_sn);
static inline bool tls_record_is_start_marker(struct tls_record_info *rec)
@@ -287,11 +311,19 @@ static inline bool tls_is_pending_open_record(struct tls_context *tls_ctx)
return tls_ctx->pending_open_record_frags;
}
+struct sk_buff *
+tls_validate_xmit_skb(struct sock *sk, struct net_device *dev,
+ struct sk_buff *skb);
+
static inline bool tls_is_sk_tx_device_offloaded(struct sock *sk)
{
- return sk_fullsock(sk) &&
- /* matches smp_store_release in tls_set_device_offload */
- smp_load_acquire(&sk->sk_destruct) == &tls_device_sk_destruct;
+#ifdef CONFIG_SOCK_VALIDATE_XMIT
+ return sk_fullsock(sk) &
+ (smp_load_acquire(&sk->sk_validate_xmit_skb) ==
+ &tls_validate_xmit_skb);
+#else
+ return false;
+#endif
}
static inline void tls_err_abort(struct sock *sk, int err)
@@ -378,23 +410,47 @@ static inline struct tls_sw_context_tx *tls_sw_ctx_tx(
return (struct tls_sw_context_tx *)tls_ctx->priv_ctx_tx;
}
-static inline struct tls_offload_context *tls_offload_ctx(
- const struct tls_context *tls_ctx)
+static inline struct tls_offload_context_tx *
+tls_offload_ctx_tx(const struct tls_context *tls_ctx)
+{
+ return (struct tls_offload_context_tx *)tls_ctx->priv_ctx_tx;
+}
+
+static inline struct tls_offload_context_rx *
+tls_offload_ctx_rx(const struct tls_context *tls_ctx)
{
- return (struct tls_offload_context *)tls_ctx->priv_ctx_tx;
+ return (struct tls_offload_context_rx *)tls_ctx->priv_ctx_rx;
}
+/* The TLS context is valid until sk_destruct is called */
+static inline void tls_offload_rx_resync_request(struct sock *sk, __be32 seq)
+{
+ struct tls_context *tls_ctx = tls_get_ctx(sk);
+ struct tls_offload_context_rx *rx_ctx = tls_offload_ctx_rx(tls_ctx);
+
+ atomic64_set(&rx_ctx->resync_req, ((((uint64_t)seq) << 32) | 1));
+}
+
+
int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
unsigned char *record_type);
void tls_register_device(struct tls_device *device);
void tls_unregister_device(struct tls_device *device);
+int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
+int decrypt_skb(struct sock *sk, struct sk_buff *skb,
+ struct scatterlist *sgout);
struct sk_buff *tls_validate_xmit_skb(struct sock *sk,
struct net_device *dev,
struct sk_buff *skb);
int tls_sw_fallback_init(struct sock *sk,
- struct tls_offload_context *offload_ctx,
+ struct tls_offload_context_tx *offload_ctx,
struct tls_crypto_info *crypto_info);
+int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
+
+void tls_device_offload_cleanup_rx(struct sock *sk);
+void handle_device_resync(struct sock *sk, u32 seq, u64 rcd_sn);
+
#endif /* _TLS_OFFLOAD_H */
diff --git a/include/net/transp_v6.h b/include/net/transp_v6.h
index f6a3543e5247..a8f6020f1196 100644
--- a/include/net/transp_v6.h
+++ b/include/net/transp_v6.h
@@ -42,8 +42,7 @@ void ip6_datagram_recv_specific_ctl(struct sock *sk, struct msghdr *msg,
struct sk_buff *skb);
int ip6_datagram_send_ctl(struct net *net, struct sock *sk, struct msghdr *msg,
- struct flowi6 *fl6, struct ipcm6_cookie *ipc6,
- struct sockcm_cookie *sockc);
+ struct flowi6 *fl6, struct ipcm6_cookie *ipc6);
void __ip6_dgram_sock_seq_show(struct seq_file *seq, struct sock *sp,
__u16 srcp, __u16 destp, int rqueue, int bucket);
diff --git a/include/net/udp.h b/include/net/udp.h
index b1ea8b0f5e6a..8482a990b0bb 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -170,8 +170,8 @@ static inline void udp_csum_pull_header(struct sk_buff *skb)
typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
__be16 dport);
-struct sk_buff **udp_gro_receive(struct sk_buff **head, struct sk_buff *skb,
- struct udphdr *uh, udp_lookup_t lookup);
+struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
+ struct udphdr *uh, udp_lookup_t lookup);
int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
@@ -285,7 +285,7 @@ int udp_init_sock(struct sock *sk);
int udp_pre_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
int __udp_disconnect(struct sock *sk, int flags);
int udp_disconnect(struct sock *sk, int flags);
-__poll_t udp_poll_mask(struct socket *sock, __poll_t events);
+__poll_t udp_poll(struct file *file, struct socket *sock, poll_table *wait);
struct sk_buff *skb_udp_tunnel_segment(struct sk_buff *skb,
netdev_features_t features,
bool is_ipv6);
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index b95a6927c718..fe680ab6b15a 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -65,9 +65,9 @@ static inline int udp_sock_create(struct net *net,
typedef int (*udp_tunnel_encap_rcv_t)(struct sock *sk, struct sk_buff *skb);
typedef void (*udp_tunnel_encap_destroy_t)(struct sock *sk);
-typedef struct sk_buff **(*udp_tunnel_gro_receive_t)(struct sock *sk,
- struct sk_buff **head,
- struct sk_buff *skb);
+typedef struct sk_buff *(*udp_tunnel_gro_receive_t)(struct sock *sk,
+ struct list_head *head,
+ struct sk_buff *skb);
typedef int (*udp_tunnel_gro_complete_t)(struct sock *sk, struct sk_buff *skb,
int nhoff);
diff --git a/include/net/xdp.h b/include/net/xdp.h
index 2deea7166a34..76b95256c266 100644
--- a/include/net/xdp.h
+++ b/include/net/xdp.h
@@ -84,6 +84,13 @@ struct xdp_frame {
struct net_device *dev_rx; /* used by cpumap */
};
+/* Clear kernel pointers in xdp_frame */
+static inline void xdp_scrub_frame(struct xdp_frame *frame)
+{
+ frame->data = NULL;
+ frame->dev_rx = NULL;
+}
+
/* Convert xdp_buff to xdp_frame */
static inline
struct xdp_frame *convert_to_xdp_frame(struct xdp_buff *xdp)
@@ -144,4 +151,17 @@ xdp_data_meta_unsupported(const struct xdp_buff *xdp)
return unlikely(xdp->data_meta > xdp->data);
}
+struct xdp_attachment_info {
+ struct bpf_prog *prog;
+ u32 flags;
+};
+
+struct netdev_bpf;
+int xdp_attachment_query(struct xdp_attachment_info *info,
+ struct netdev_bpf *bpf);
+bool xdp_attachment_flags_ok(struct xdp_attachment_info *info,
+ struct netdev_bpf *bpf);
+void xdp_attachment_setup(struct xdp_attachment_info *info,
+ struct netdev_bpf *bpf);
+
#endif /* __LINUX_NET_XDP_H__ */
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index 9fe472f2ac95..7161856bcf9c 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -60,6 +60,10 @@ struct xdp_sock {
bool zc;
/* Protects multiple processes in the control path */
struct mutex mutex;
+ /* Mutual exclusion of NAPI TX thread and sendmsg error paths
+ * in the SKB destructor callback.
+ */
+ spinlock_t tx_completion_lock;
u64 rx_dropped;
};
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 557122846e0e..0eb390c205af 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -23,6 +23,7 @@
#include <net/ipv6.h>
#include <net/ip6_fib.h>
#include <net/flow.h>
+#include <net/gro_cells.h>
#include <linux/interrupt.h>
@@ -147,6 +148,7 @@ struct xfrm_state {
struct xfrm_id id;
struct xfrm_selector sel;
struct xfrm_mark mark;
+ u32 if_id;
u32 tfcpad;
u32 genid;
@@ -166,7 +168,7 @@ struct xfrm_state {
int header_len;
int trailer_len;
u32 extra_flags;
- u32 output_mark;
+ struct xfrm_mark smark;
} props;
struct xfrm_lifetime_cfg lft;
@@ -225,7 +227,7 @@ struct xfrm_state {
long saved_tmo;
/* Last used time */
- unsigned long lastused;
+ time64_t lastused;
struct page_frag xfrag;
@@ -292,6 +294,13 @@ struct xfrm_replay {
int (*overflow)(struct xfrm_state *x, struct sk_buff *skb);
};
+struct xfrm_if_cb {
+ struct xfrm_if *(*decode_session)(struct sk_buff *skb);
+};
+
+void xfrm_if_register_cb(const struct xfrm_if_cb *ifcb);
+void xfrm_if_unregister_cb(void);
+
struct net_device;
struct xfrm_type;
struct xfrm_dst;
@@ -323,7 +332,6 @@ int xfrm_policy_register_afinfo(const struct xfrm_policy_afinfo *afinfo, int fam
void xfrm_policy_unregister_afinfo(const struct xfrm_policy_afinfo *afinfo);
void km_policy_notify(struct xfrm_policy *xp, int dir,
const struct km_event *c);
-void xfrm_policy_cache_flush(void);
void km_state_notify(struct xfrm_state *x, const struct km_event *c);
struct xfrm_tmpl;
@@ -574,6 +582,7 @@ struct xfrm_policy {
atomic_t genid;
u32 priority;
u32 index;
+ u32 if_id;
struct xfrm_mark mark;
struct xfrm_selector selector;
struct xfrm_lifetime_cfg lft;
@@ -735,7 +744,7 @@ static inline struct audit_buffer *xfrm_audit_start(const char *op)
{
struct audit_buffer *audit_buf = NULL;
- if (audit_enabled == 0)
+ if (audit_enabled == AUDIT_OFF)
return NULL;
audit_buf = audit_log_start(audit_context(), GFP_ATOMIC,
AUDIT_MAC_IPSEC_EVENT);
@@ -1037,6 +1046,22 @@ static inline void xfrm_dst_destroy(struct xfrm_dst *xdst)
void xfrm_dst_ifdown(struct dst_entry *dst, struct net_device *dev);
+struct xfrm_if_parms {
+ char name[IFNAMSIZ]; /* name of XFRM device */
+ int link; /* ifindex of underlying L2 interface */
+ u32 if_id; /* interface identifyer */
+};
+
+struct xfrm_if {
+ struct xfrm_if __rcu *next; /* next interface in list */
+ struct net_device *dev; /* virtual device associated with interface */
+ struct net_device *phydev; /* physical device */
+ struct net *net; /* netns for packet i/o */
+ struct xfrm_if_parms p; /* interface parms */
+
+ struct gro_cells gro_cells;
+};
+
struct xfrm_offload {
/* Output sequence number for replay protection on offloading. */
struct {
@@ -1532,8 +1557,8 @@ struct xfrm_state *xfrm_state_find(const xfrm_address_t *daddr,
const struct flowi *fl,
struct xfrm_tmpl *tmpl,
struct xfrm_policy *pol, int *err,
- unsigned short family);
-struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark,
+ unsigned short family, u32 if_id);
+struct xfrm_state *xfrm_stateonly_find(struct net *net, u32 mark, u32 if_id,
xfrm_address_t *daddr,
xfrm_address_t *saddr,
unsigned short family,
@@ -1690,20 +1715,20 @@ int xfrm_policy_walk(struct net *net, struct xfrm_policy_walk *walk,
void *);
void xfrm_policy_walk_done(struct xfrm_policy_walk *walk, struct net *net);
int xfrm_policy_insert(int dir, struct xfrm_policy *policy, int excl);
-struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark,
+struct xfrm_policy *xfrm_policy_bysel_ctx(struct net *net, u32 mark, u32 if_id,
u8 type, int dir,
struct xfrm_selector *sel,
struct xfrm_sec_ctx *ctx, int delete,
int *err);
-struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u8, int dir,
- u32 id, int delete, int *err);
+struct xfrm_policy *xfrm_policy_byid(struct net *net, u32 mark, u32 if_id, u8,
+ int dir, u32 id, int delete, int *err);
int xfrm_policy_flush(struct net *net, u8 type, bool task_valid);
void xfrm_policy_hash_rebuild(struct net *net);
u32 xfrm_get_acqseq(void);
int verify_spi_info(u8 proto, u32 min, u32 max);
int xfrm_alloc_spi(struct xfrm_state *x, u32 minspi, u32 maxspi);
struct xfrm_state *xfrm_find_acq(struct net *net, const struct xfrm_mark *mark,
- u8 mode, u32 reqid, u8 proto,
+ u8 mode, u32 reqid, u32 if_id, u8 proto,
const xfrm_address_t *daddr,
const xfrm_address_t *saddr, int create,
unsigned short family);
@@ -2012,6 +2037,22 @@ static inline int xfrm_mark_put(struct sk_buff *skb, const struct xfrm_mark *m)
return ret;
}
+static inline __u32 xfrm_smark_get(__u32 mark, struct xfrm_state *x)
+{
+ struct xfrm_mark *m = &x->props.smark;
+
+ return (m->v & m->m) | (mark & ~m->m);
+}
+
+static inline int xfrm_if_id_put(struct sk_buff *skb, __u32 if_id)
+{
+ int ret = 0;
+
+ if (if_id)
+ ret = nla_put_u32(skb, XFRMA_IF_ID, if_id);
+ return ret;
+}
+
static inline int xfrm_tunnel_check(struct sk_buff *skb, struct xfrm_state *x,
unsigned int family)
{
diff --git a/include/pcmcia/ds.h b/include/pcmcia/ds.h
index 2d56e428506c..3037157855f0 100644
--- a/include/pcmcia/ds.h
+++ b/include/pcmcia/ds.h
@@ -206,16 +206,6 @@ int pcmcia_write_config_byte(struct pcmcia_device *p_dev, off_t where, u8 val);
/* device configuration */
int pcmcia_request_io(struct pcmcia_device *p_dev);
-int __must_check
-__pcmcia_request_exclusive_irq(struct pcmcia_device *p_dev,
- irq_handler_t handler);
-static inline __must_check __deprecated int
-pcmcia_request_exclusive_irq(struct pcmcia_device *p_dev,
- irq_handler_t handler)
-{
- return __pcmcia_request_exclusive_irq(p_dev, handler);
-}
-
int __must_check pcmcia_request_irq(struct pcmcia_device *p_dev,
irq_handler_t handler);
diff --git a/include/rdma/ib.h b/include/rdma/ib.h
index 66dbed0c146d..4f385ec54f80 100644
--- a/include/rdma/ib.h
+++ b/include/rdma/ib.h
@@ -53,12 +53,12 @@ struct ib_addr {
#define sib_interface_id ib_u.uib_addr64[1]
};
-static inline int ib_addr_any(const struct ib_addr *a)
+static inline bool ib_addr_any(const struct ib_addr *a)
{
return ((a->sib_addr64[0] | a->sib_addr64[1]) == 0);
}
-static inline int ib_addr_loopback(const struct ib_addr *a)
+static inline bool ib_addr_loopback(const struct ib_addr *a)
{
return ((a->sib_addr32[0] | a->sib_addr32[1] |
a->sib_addr32[2] | (a->sib_addr32[3] ^ htonl(1))) == 0);
diff --git a/include/rdma/ib_addr.h b/include/rdma/ib_addr.h
index c2c8b1fdeead..77c7908b7d73 100644
--- a/include/rdma/ib_addr.h
+++ b/include/rdma/ib_addr.h
@@ -58,6 +58,7 @@
* @bound_dev_if: An optional device interface index.
* @transport: The transport type used.
* @net: Network namespace containing the bound_dev_if net_dev.
+ * @sgid_attr: GID attribute to use for identified SGID
*/
struct rdma_dev_addr {
unsigned char src_dev_addr[MAX_ADDR_LEN];
@@ -67,6 +68,7 @@ struct rdma_dev_addr {
int bound_dev_if;
enum rdma_transport_type transport;
struct net *net;
+ const struct ib_gid_attr *sgid_attr;
enum rdma_network_type network;
int hoplimit;
};
@@ -95,7 +97,7 @@ int rdma_translate_ip(const struct sockaddr *addr,
* or been canceled. A status of 0 indicates success.
* @context: User-specified context associated with the call.
*/
-int rdma_resolve_ip(struct sockaddr *src_addr, struct sockaddr *dst_addr,
+int rdma_resolve_ip(struct sockaddr *src_addr, const struct sockaddr *dst_addr,
struct rdma_dev_addr *addr, int timeout_ms,
void (*callback)(int status, struct sockaddr *src_addr,
struct rdma_dev_addr *addr, void *context),
@@ -107,7 +109,7 @@ void rdma_copy_addr(struct rdma_dev_addr *dev_addr,
const struct net_device *dev,
const unsigned char *dst_dev_addr);
-int rdma_addr_size(struct sockaddr *addr);
+int rdma_addr_size(const struct sockaddr *addr);
int rdma_addr_size_in6(struct sockaddr_in6 *addr);
int rdma_addr_size_kss(struct __kernel_sockaddr_storage *addr);
diff --git a/include/rdma/ib_cache.h b/include/rdma/ib_cache.h
index a5f249828115..62e990b620aa 100644
--- a/include/rdma/ib_cache.h
+++ b/include/rdma/ib_cache.h
@@ -37,45 +37,23 @@
#include <rdma/ib_verbs.h>
-/**
- * ib_get_cached_gid - Returns a cached GID table entry
- * @device: The device to query.
- * @port_num: The port number of the device to query.
- * @index: The index into the cached GID table to query.
- * @gid: The GID value found at the specified index.
- * @attr: The GID attribute found at the specified index (only in RoCE).
- * NULL means ignore (output parameter).
- *
- * ib_get_cached_gid() fetches the specified GID table entry stored in
- * the local software cache.
- */
-int ib_get_cached_gid(struct ib_device *device,
- u8 port_num,
- int index,
- union ib_gid *gid,
- struct ib_gid_attr *attr);
-
-int ib_find_cached_gid(struct ib_device *device,
- const union ib_gid *gid,
- enum ib_gid_type gid_type,
- struct net_device *ndev,
- u8 *port_num,
- u16 *index);
+int rdma_query_gid(struct ib_device *device, u8 port_num, int index,
+ union ib_gid *gid);
+const struct ib_gid_attr *rdma_find_gid(struct ib_device *device,
+ const union ib_gid *gid,
+ enum ib_gid_type gid_type,
+ struct net_device *ndev);
+const struct ib_gid_attr *rdma_find_gid_by_port(struct ib_device *ib_dev,
+ const union ib_gid *gid,
+ enum ib_gid_type gid_type,
+ u8 port,
+ struct net_device *ndev);
+const struct ib_gid_attr *rdma_find_gid_by_filter(
+ struct ib_device *device, const union ib_gid *gid, u8 port_num,
+ bool (*filter)(const union ib_gid *gid, const struct ib_gid_attr *,
+ void *),
+ void *context);
-int ib_find_cached_gid_by_port(struct ib_device *device,
- const union ib_gid *gid,
- enum ib_gid_type gid_type,
- u8 port_num,
- struct net_device *ndev,
- u16 *index);
-
-int ib_find_gid_by_filter(struct ib_device *device,
- const union ib_gid *gid,
- u8 port_num,
- bool (*filter)(const union ib_gid *gid,
- const struct ib_gid_attr *,
- void *),
- void *context, u16 *index);
/**
* ib_get_cached_pkey - Returns a cached PKey table entry
* @device: The device to query.
@@ -150,4 +128,9 @@ int ib_get_cached_port_state(struct ib_device *device,
enum ib_port_state *port_active);
bool rdma_is_zero_gid(const union ib_gid *gid);
+const struct ib_gid_attr *rdma_get_gid_attr(struct ib_device *device,
+ u8 port_num, int index);
+void rdma_put_gid_attr(const struct ib_gid_attr *attr);
+void rdma_hold_gid_attr(const struct ib_gid_attr *attr);
+
#endif /* _IB_CACHE_H */
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 7979cb04f529..c10f4b5ea8ab 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -120,6 +120,13 @@ struct ib_cm_req_event_param {
struct sa_path_rec *primary_path;
struct sa_path_rec *alternate_path;
+ /*
+ * SGID attribute of the primary path. Currently only
+ * useful for RoCE. Alternate path GID attributes
+ * are not yet supported.
+ */
+ const struct ib_gid_attr *ppath_sgid_attr;
+
__be64 remote_ca_guid;
u32 remote_qkey;
u32 remote_qpn;
@@ -226,6 +233,12 @@ struct ib_cm_apr_event_param {
struct ib_cm_sidr_req_event_param {
struct ib_cm_id *listen_id;
__be64 service_id;
+
+ /*
+ * SGID attribute of the request. Currently only
+ * useful for RoCE.
+ */
+ const struct ib_gid_attr *sgid_attr;
/* P_Key that was used by the GMP's BTH header */
u16 bth_pkey;
u8 port;
@@ -246,6 +259,7 @@ struct ib_cm_sidr_rep_event_param {
u32 qkey;
u32 qpn;
void *info;
+ const struct ib_gid_attr *sgid_attr;
u8 info_len;
};
@@ -297,7 +311,7 @@ struct ib_cm_event {
* destroy the @cm_id after the callback completes.
*/
typedef int (*ib_cm_handler)(struct ib_cm_id *cm_id,
- struct ib_cm_event *event);
+ const struct ib_cm_event *event);
struct ib_cm_id {
ib_cm_handler cm_handler;
@@ -365,6 +379,7 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
struct ib_cm_req_param {
struct sa_path_rec *primary_path;
struct sa_path_rec *alternate_path;
+ const struct ib_gid_attr *ppath_sgid_attr;
__be64 service_id;
u32 qp_num;
enum ib_qp_type qp_type;
@@ -566,6 +581,7 @@ int ib_send_cm_apr(struct ib_cm_id *cm_id,
struct ib_cm_sidr_req_param {
struct sa_path_rec *path;
+ const struct ib_gid_attr *sgid_attr;
__be64 service_id;
int timeout_ms;
const void *private_data;
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 2f4f1768ded4..f6ba366051c7 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -262,6 +262,39 @@ struct ib_class_port_info {
__be32 trap_qkey;
};
+/* PortInfo CapabilityMask */
+enum ib_port_capability_mask_bits {
+ IB_PORT_SM = 1 << 1,
+ IB_PORT_NOTICE_SUP = 1 << 2,
+ IB_PORT_TRAP_SUP = 1 << 3,
+ IB_PORT_OPT_IPD_SUP = 1 << 4,
+ IB_PORT_AUTO_MIGR_SUP = 1 << 5,
+ IB_PORT_SL_MAP_SUP = 1 << 6,
+ IB_PORT_MKEY_NVRAM = 1 << 7,
+ IB_PORT_PKEY_NVRAM = 1 << 8,
+ IB_PORT_LED_INFO_SUP = 1 << 9,
+ IB_PORT_SM_DISABLED = 1 << 10,
+ IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
+ IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
+ IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
+ IB_PORT_CM_SUP = 1 << 16,
+ IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
+ IB_PORT_REINIT_SUP = 1 << 18,
+ IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
+ IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
+ IB_PORT_DR_NOTICE_SUP = 1 << 21,
+ IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
+ IB_PORT_BOOT_MGMT_SUP = 1 << 23,
+ IB_PORT_LINK_LATENCY_SUP = 1 << 24,
+ IB_PORT_CLIENT_REG_SUP = 1 << 25,
+ IB_PORT_OTHER_LOCAL_CHANGES_SUP = 1 << 26,
+ IB_PORT_LINK_SPEED_WIDTH_TABLE_SUP = 1 << 27,
+ IB_PORT_VENDOR_SPECIFIC_MADS_TABLE_SUP = 1 << 28,
+ IB_PORT_MCAST_PKEY_TRAP_SUPPRESSION_SUP = 1 << 29,
+ IB_PORT_MCAST_FDB_TOP_SUP = 1 << 30,
+ IB_PORT_HIERARCHY_INFO_SUP = 1ULL << 31,
+};
+
#define OPA_CLASS_PORT_INFO_PR_SUPPORT BIT(26)
struct opa_class_port_info {
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index bacb144f7780..b6ddf2a1b9d8 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -172,12 +172,7 @@ struct sa_path_rec_ib {
*/
struct sa_path_rec_roce {
bool route_resolved;
- u8 dmac[ETH_ALEN];
- /* ignored in IB */
- int ifindex;
- /* ignored in IB */
- struct net *net;
-
+ u8 dmac[ETH_ALEN];
};
struct sa_path_rec_opa {
@@ -556,13 +551,10 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
enum ib_gid_type gid_type,
struct rdma_ah_attr *ah_attr);
-/**
- * ib_init_ah_attr_from_path - Initialize address handle attributes based on
- * an SA path record.
- */
int ib_init_ah_attr_from_path(struct ib_device *device, u8 port_num,
struct sa_path_rec *rec,
- struct rdma_ah_attr *ah_attr);
+ struct rdma_ah_attr *ah_attr,
+ const struct ib_gid_attr *sgid_attr);
/**
* ib_sa_pack_path - Conert a path record from struct ib_sa_path_rec
@@ -667,45 +659,10 @@ static inline void sa_path_set_dmac_zero(struct sa_path_rec *rec)
eth_zero_addr(rec->roce.dmac);
}
-static inline void sa_path_set_ifindex(struct sa_path_rec *rec, int ifindex)
-{
- if (sa_path_is_roce(rec))
- rec->roce.ifindex = ifindex;
-}
-
-static inline void sa_path_set_ndev(struct sa_path_rec *rec, struct net *net)
-{
- if (sa_path_is_roce(rec))
- rec->roce.net = net;
-}
-
static inline u8 *sa_path_get_dmac(struct sa_path_rec *rec)
{
if (sa_path_is_roce(rec))
return rec->roce.dmac;
return NULL;
}
-
-static inline int sa_path_get_ifindex(struct sa_path_rec *rec)
-{
- if (sa_path_is_roce(rec))
- return rec->roce.ifindex;
- return 0;
-}
-
-static inline struct net *sa_path_get_ndev(struct sa_path_rec *rec)
-{
- if (sa_path_is_roce(rec))
- return rec->roce.net;
- return NULL;
-}
-
-static inline struct net_device *ib_get_ndev_from_path(struct sa_path_rec *rec)
-{
- return sa_path_get_ndev(rec) ?
- dev_get_by_index(sa_path_get_ndev(rec),
- sa_path_get_ifindex(rec))
- : NULL;
-}
-
#endif /* IB_SA_H */
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 6a17f856f841..381cdf5a9bd1 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -119,7 +119,8 @@ typedef int (*umem_call_back)(struct ib_umem *item, u64 start, u64 end,
*/
int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
u64 start, u64 end,
- umem_call_back cb, void *cookie);
+ umem_call_back cb,
+ bool blockable, void *cookie);
/*
* Find first region intersecting with address range.
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 4c6241bc2039..e950c2a68f06 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -94,6 +94,7 @@ enum ib_gid_type {
struct ib_gid_attr {
struct net_device *ndev;
struct ib_device *device;
+ union ib_gid gid;
enum ib_gid_type gid_type;
u16 index;
u8 port_num;
@@ -148,13 +149,13 @@ static inline enum ib_gid_type ib_network_to_gid_type(enum rdma_network_type net
return IB_GID_TYPE_IB;
}
-static inline enum rdma_network_type ib_gid_to_network_type(enum ib_gid_type gid_type,
- union ib_gid *gid)
+static inline enum rdma_network_type
+rdma_gid_attr_network_type(const struct ib_gid_attr *attr)
{
- if (gid_type == IB_GID_TYPE_IB)
+ if (attr->gid_type == IB_GID_TYPE_IB)
return RDMA_NETWORK_IB;
- if (ipv6_addr_v4mapped((struct in6_addr *)gid))
+ if (ipv6_addr_v4mapped((struct in6_addr *)&attr->gid))
return RDMA_NETWORK_IPV4;
else
return RDMA_NETWORK_IPV6;
@@ -344,7 +345,8 @@ struct ib_device_attr {
int max_qp;
int max_qp_wr;
u64 device_cap_flags;
- int max_sge;
+ int max_send_sge;
+ int max_recv_sge;
int max_sge_rd;
int max_cq;
int max_cqe;
@@ -430,33 +432,6 @@ enum ib_port_state {
IB_PORT_ACTIVE_DEFER = 5
};
-enum ib_port_cap_flags {
- IB_PORT_SM = 1 << 1,
- IB_PORT_NOTICE_SUP = 1 << 2,
- IB_PORT_TRAP_SUP = 1 << 3,
- IB_PORT_OPT_IPD_SUP = 1 << 4,
- IB_PORT_AUTO_MIGR_SUP = 1 << 5,
- IB_PORT_SL_MAP_SUP = 1 << 6,
- IB_PORT_MKEY_NVRAM = 1 << 7,
- IB_PORT_PKEY_NVRAM = 1 << 8,
- IB_PORT_LED_INFO_SUP = 1 << 9,
- IB_PORT_SM_DISABLED = 1 << 10,
- IB_PORT_SYS_IMAGE_GUID_SUP = 1 << 11,
- IB_PORT_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
- IB_PORT_EXTENDED_SPEEDS_SUP = 1 << 14,
- IB_PORT_CM_SUP = 1 << 16,
- IB_PORT_SNMP_TUNNEL_SUP = 1 << 17,
- IB_PORT_REINIT_SUP = 1 << 18,
- IB_PORT_DEVICE_MGMT_SUP = 1 << 19,
- IB_PORT_VENDOR_CLASS_SUP = 1 << 20,
- IB_PORT_DR_NOTICE_SUP = 1 << 21,
- IB_PORT_CAP_MASK_NOTICE_SUP = 1 << 22,
- IB_PORT_BOOT_MGMT_SUP = 1 << 23,
- IB_PORT_LINK_LATENCY_SUP = 1 << 24,
- IB_PORT_CLIENT_REG_SUP = 1 << 25,
- IB_PORT_IP_BASED_GIDS = 1 << 26,
-};
-
enum ib_port_width {
IB_WIDTH_1X = 1,
IB_WIDTH_4X = 2,
@@ -554,6 +529,7 @@ static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
#define RDMA_CORE_CAP_AF_IB 0x00001000
#define RDMA_CORE_CAP_ETH_AH 0x00002000
#define RDMA_CORE_CAP_OPA_AH 0x00004000
+#define RDMA_CORE_CAP_IB_GRH_REQUIRED 0x00008000
/* Protocol 0xFFF00000 */
#define RDMA_CORE_CAP_PROT_IB 0x00100000
@@ -563,6 +539,10 @@ static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
#define RDMA_CORE_CAP_PROT_RAW_PACKET 0x01000000
#define RDMA_CORE_CAP_PROT_USNIC 0x02000000
+#define RDMA_CORE_PORT_IB_GRH_REQUIRED (RDMA_CORE_CAP_IB_GRH_REQUIRED \
+ | RDMA_CORE_CAP_PROT_ROCE \
+ | RDMA_CORE_CAP_PROT_ROCE_UDP_ENCAP)
+
#define RDMA_CORE_PORT_IBA_IB (RDMA_CORE_CAP_PROT_IB \
| RDMA_CORE_CAP_IB_MAD \
| RDMA_CORE_CAP_IB_SMI \
@@ -595,6 +575,8 @@ struct ib_port_attr {
enum ib_mtu max_mtu;
enum ib_mtu active_mtu;
int gid_tbl_len;
+ unsigned int ip_gids:1;
+ /* This is the value from PortInfo CapabilityMask, defined by IBA */
u32 port_cap_flags;
u32 max_msg_sz;
u32 bad_pkey_cntr;
@@ -610,7 +592,6 @@ struct ib_port_attr {
u8 active_width;
u8 active_speed;
u8 phys_state;
- bool grh_required;
};
enum ib_device_modify_flags {
@@ -689,6 +670,7 @@ struct ib_event_handler {
} while (0)
struct ib_global_route {
+ const struct ib_gid_attr *sgid_attr;
union ib_gid dgid;
u32 flow_label;
u8 sgid_index;
@@ -1370,7 +1352,7 @@ struct ib_rdma_wr {
u32 rkey;
};
-static inline struct ib_rdma_wr *rdma_wr(struct ib_send_wr *wr)
+static inline const struct ib_rdma_wr *rdma_wr(const struct ib_send_wr *wr)
{
return container_of(wr, struct ib_rdma_wr, wr);
}
@@ -1385,7 +1367,7 @@ struct ib_atomic_wr {
u32 rkey;
};
-static inline struct ib_atomic_wr *atomic_wr(struct ib_send_wr *wr)
+static inline const struct ib_atomic_wr *atomic_wr(const struct ib_send_wr *wr)
{
return container_of(wr, struct ib_atomic_wr, wr);
}
@@ -1402,7 +1384,7 @@ struct ib_ud_wr {
u8 port_num; /* valid for DR SMPs on switch only */
};
-static inline struct ib_ud_wr *ud_wr(struct ib_send_wr *wr)
+static inline const struct ib_ud_wr *ud_wr(const struct ib_send_wr *wr)
{
return container_of(wr, struct ib_ud_wr, wr);
}
@@ -1414,7 +1396,7 @@ struct ib_reg_wr {
int access;
};
-static inline struct ib_reg_wr *reg_wr(struct ib_send_wr *wr)
+static inline const struct ib_reg_wr *reg_wr(const struct ib_send_wr *wr)
{
return container_of(wr, struct ib_reg_wr, wr);
}
@@ -1427,7 +1409,8 @@ struct ib_sig_handover_wr {
struct ib_sge *prot;
};
-static inline struct ib_sig_handover_wr *sig_handover_wr(struct ib_send_wr *wr)
+static inline const struct ib_sig_handover_wr *
+sig_handover_wr(const struct ib_send_wr *wr)
{
return container_of(wr, struct ib_sig_handover_wr, wr);
}
@@ -1443,14 +1426,16 @@ struct ib_recv_wr {
};
enum ib_access_flags {
- IB_ACCESS_LOCAL_WRITE = 1,
- IB_ACCESS_REMOTE_WRITE = (1<<1),
- IB_ACCESS_REMOTE_READ = (1<<2),
- IB_ACCESS_REMOTE_ATOMIC = (1<<3),
- IB_ACCESS_MW_BIND = (1<<4),
- IB_ZERO_BASED = (1<<5),
- IB_ACCESS_ON_DEMAND = (1<<6),
- IB_ACCESS_HUGETLB = (1<<7),
+ IB_ACCESS_LOCAL_WRITE = IB_UVERBS_ACCESS_LOCAL_WRITE,
+ IB_ACCESS_REMOTE_WRITE = IB_UVERBS_ACCESS_REMOTE_WRITE,
+ IB_ACCESS_REMOTE_READ = IB_UVERBS_ACCESS_REMOTE_READ,
+ IB_ACCESS_REMOTE_ATOMIC = IB_UVERBS_ACCESS_REMOTE_ATOMIC,
+ IB_ACCESS_MW_BIND = IB_UVERBS_ACCESS_MW_BIND,
+ IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
+ IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
+ IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
+
+ IB_ACCESS_SUPPORTED = ((IB_ACCESS_HUGETLB << 1) - 1)
};
/*
@@ -1473,14 +1458,17 @@ struct ib_fmr_attr {
struct ib_umem;
enum rdma_remove_reason {
- /* Userspace requested uobject deletion. Call could fail */
+ /*
+ * Userspace requested uobject deletion or initial try
+ * to remove uobject via cleanup. Call could fail
+ */
RDMA_REMOVE_DESTROY,
/* Context deletion. This call should delete the actual object itself */
RDMA_REMOVE_CLOSE,
/* Driver is being hot-unplugged. This call should delete the actual object itself */
RDMA_REMOVE_DRIVER_REMOVE,
- /* Context is being cleaned-up, but commit was just completed */
- RDMA_REMOVE_DURING_CLEANUP,
+ /* uobj is being cleaned-up before being committed */
+ RDMA_REMOVE_ABORT,
};
struct ib_rdmacg_object {
@@ -1492,14 +1480,14 @@ struct ib_rdmacg_object {
struct ib_ucontext {
struct ib_device *device;
struct ib_uverbs_file *ufile;
+ /*
+ * 'closing' can be read by the driver only during a destroy callback,
+ * it is set when we are closing the file descriptor and indicates
+ * that mm_sem may be locked.
+ */
int closing;
- /* locking the uobjects_list */
- struct mutex uobjects_lock;
- struct list_head uobjects;
- /* protects cleanup process from other actions */
- struct rw_semaphore cleanup_rwsem;
- enum rdma_remove_reason cleanup_reason;
+ bool cleanup_retryable;
struct pid *tgid;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
@@ -1524,6 +1512,9 @@ struct ib_ucontext {
struct ib_uobject {
u64 user_handle; /* handle given to us by userspace */
+ /* ufile & ucontext owning this object */
+ struct ib_uverbs_file *ufile;
+ /* FIXME, save memory: ufile->context == context */
struct ib_ucontext *context; /* associated user context */
void *object; /* containing object */
struct list_head list; /* link to context's list */
@@ -1533,13 +1524,7 @@ struct ib_uobject {
atomic_t usecnt; /* protects exclusive access */
struct rcu_head rcu; /* kfree_rcu() overhead */
- const struct uverbs_obj_type *type;
-};
-
-struct ib_uobject_file {
- struct ib_uobject uobj;
- /* ufile contains the lock between context release and file close */
- struct ib_uverbs_file *ufile;
+ const struct uverbs_api_object *uapi_object;
};
struct ib_udata {
@@ -1578,6 +1563,7 @@ struct ib_ah {
struct ib_device *device;
struct ib_pd *pd;
struct ib_uobject *uobject;
+ const struct ib_gid_attr *sgid_attr;
enum rdma_ah_attr_type type;
};
@@ -1776,6 +1762,9 @@ struct ib_qp {
struct ib_uobject *uobject;
void (*event_handler)(struct ib_event *, void *);
void *qp_context;
+ /* sgid_attrs associated with the AV's */
+ const struct ib_gid_attr *av_sgid_attr;
+ const struct ib_gid_attr *alt_path_sgid_attr;
u32 qp_num;
u32 max_write_sge;
u32 max_read_sge;
@@ -2098,6 +2087,7 @@ struct ib_flow_attr {
struct ib_flow {
struct ib_qp *qp;
+ struct ib_device *device;
struct ib_uobject *uobject;
};
@@ -2213,7 +2203,11 @@ struct rdma_netdev {
struct ib_device *hca;
u8 port_num;
- /* cleanup function must be specified */
+ /*
+ * cleanup function must be specified.
+ * FIXME: This is only used for OPA_VNIC and that usage should be
+ * removed too.
+ */
void (*free_rdma_netdev)(struct net_device *netdev);
/* control functions */
@@ -2242,11 +2236,6 @@ struct ib_counters {
atomic_t usecnt;
};
-enum ib_read_counters_flags {
- /* prefer read values from driver cache */
- IB_READ_COUNTERS_ATTR_PREFER_CACHED = 1 << 0,
-};
-
struct ib_counters_read_attr {
u64 *counters_buff;
u32 ncounters;
@@ -2341,8 +2330,7 @@ struct ib_device {
* concurrently for different ports. This function is only called when
* roce_gid_table is used.
*/
- int (*add_gid)(const union ib_gid *gid,
- const struct ib_gid_attr *attr,
+ int (*add_gid)(const struct ib_gid_attr *attr,
void **context);
/* When calling del_gid, the HW vendor's driver should delete the
* gid of device @device at gid index gid_index of port port_num
@@ -2389,8 +2377,8 @@ struct ib_device {
struct ib_srq_attr *srq_attr);
int (*destroy_srq)(struct ib_srq *srq);
int (*post_srq_recv)(struct ib_srq *srq,
- struct ib_recv_wr *recv_wr,
- struct ib_recv_wr **bad_recv_wr);
+ const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr);
struct ib_qp * (*create_qp)(struct ib_pd *pd,
struct ib_qp_init_attr *qp_init_attr,
struct ib_udata *udata);
@@ -2404,11 +2392,11 @@ struct ib_device {
struct ib_qp_init_attr *qp_init_attr);
int (*destroy_qp)(struct ib_qp *qp);
int (*post_send)(struct ib_qp *qp,
- struct ib_send_wr *send_wr,
- struct ib_send_wr **bad_send_wr);
+ const struct ib_send_wr *send_wr,
+ const struct ib_send_wr **bad_send_wr);
int (*post_recv)(struct ib_qp *qp,
- struct ib_recv_wr *recv_wr,
- struct ib_recv_wr **bad_recv_wr);
+ const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr);
struct ib_cq * (*create_cq)(struct ib_device *device,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *context,
@@ -2592,7 +2580,7 @@ struct ib_device {
const struct cpumask *(*get_vector_affinity)(struct ib_device *ibdev,
int comp_vector);
- struct uverbs_root_spec *specs_root;
+ const struct uverbs_object_tree_def *const *driver_specs;
enum rdma_driver_id driver_id;
};
@@ -2679,6 +2667,46 @@ static inline bool ib_is_udata_cleared(struct ib_udata *udata,
}
/**
+ * ib_is_destroy_retryable - Check whether the uobject destruction
+ * is retryable.
+ * @ret: The initial destruction return code
+ * @why: remove reason
+ * @uobj: The uobject that is destroyed
+ *
+ * This function is a helper function that IB layer and low-level drivers
+ * can use to consider whether the destruction of the given uobject is
+ * retry-able.
+ * It checks the original return code, if it wasn't success the destruction
+ * is retryable according to the ucontext state (i.e. cleanup_retryable) and
+ * the remove reason. (i.e. why).
+ * Must be called with the object locked for destroy.
+ */
+static inline bool ib_is_destroy_retryable(int ret, enum rdma_remove_reason why,
+ struct ib_uobject *uobj)
+{
+ return ret && (why == RDMA_REMOVE_DESTROY ||
+ uobj->context->cleanup_retryable);
+}
+
+/**
+ * ib_destroy_usecnt - Called during destruction to check the usecnt
+ * @usecnt: The usecnt atomic
+ * @why: remove reason
+ * @uobj: The uobject that is destroyed
+ *
+ * Non-zero usecnts will block destruction unless destruction was triggered by
+ * a ucontext cleanup.
+ */
+static inline int ib_destroy_usecnt(atomic_t *usecnt,
+ enum rdma_remove_reason why,
+ struct ib_uobject *uobj)
+{
+ if (atomic_read(usecnt) && ib_is_destroy_retryable(-EBUSY, why, uobj))
+ return -EBUSY;
+ return 0;
+}
+
+/**
* ib_modify_qp_is_ok - Check that the supplied attribute mask
* contains all required attributes and no attributes not allowed for
* the given QP state transition.
@@ -2755,6 +2783,13 @@ static inline int rdma_is_port_valid(const struct ib_device *device,
port <= rdma_end_port(device));
}
+static inline bool rdma_is_grh_required(const struct ib_device *device,
+ u8 port_num)
+{
+ return device->port_immutable[port_num].core_cap_flags &
+ RDMA_CORE_PORT_IB_GRH_REQUIRED;
+}
+
static inline bool rdma_protocol_ib(const struct ib_device *device, u8 port_num)
{
return device->port_immutable[port_num].core_cap_flags & RDMA_CORE_CAP_PROT_IB;
@@ -3046,10 +3081,6 @@ static inline bool rdma_cap_read_inv(struct ib_device *dev, u32 port_num)
return rdma_protocol_iwarp(dev, port_num);
}
-int ib_query_gid(struct ib_device *device,
- u8 port_num, int index, union ib_gid *gid,
- struct ib_gid_attr *attr);
-
int ib_set_vf_link_state(struct ib_device *device, int vf, u8 port,
int state);
int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
@@ -3148,6 +3179,13 @@ int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
* ignored unless the work completion indicates that the GRH is valid.
* @ah_attr: Returned attributes that can be used when creating an address
* handle for replying to the message.
+ * When ib_init_ah_attr_from_wc() returns success,
+ * (a) for IB link layer it optionally contains a reference to SGID attribute
+ * when GRH is present for IB link layer.
+ * (b) for RoCE link layer it contains a reference to SGID attribute.
+ * User must invoke rdma_cleanup_ah_attr_gid_attr() to release reference to SGID
+ * attributes which are initialized using ib_init_ah_attr_from_wc().
+ *
*/
int ib_init_ah_attr_from_wc(struct ib_device *device, u8 port_num,
const struct ib_wc *wc, const struct ib_grh *grh,
@@ -3247,10 +3285,12 @@ int ib_destroy_srq(struct ib_srq *srq);
* the work request that failed to be posted on the QP.
*/
static inline int ib_post_srq_recv(struct ib_srq *srq,
- struct ib_recv_wr *recv_wr,
- struct ib_recv_wr **bad_recv_wr)
+ const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr)
{
- return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr);
+ const struct ib_recv_wr *dummy;
+
+ return srq->device->post_srq_recv(srq, recv_wr, bad_recv_wr ? : &dummy);
}
/**
@@ -3348,10 +3388,12 @@ int ib_close_qp(struct ib_qp *qp);
* earlier work requests in the list.
*/
static inline int ib_post_send(struct ib_qp *qp,
- struct ib_send_wr *send_wr,
- struct ib_send_wr **bad_send_wr)
+ const struct ib_send_wr *send_wr,
+ const struct ib_send_wr **bad_send_wr)
{
- return qp->device->post_send(qp, send_wr, bad_send_wr);
+ const struct ib_send_wr *dummy;
+
+ return qp->device->post_send(qp, send_wr, bad_send_wr ? : &dummy);
}
/**
@@ -3363,10 +3405,12 @@ static inline int ib_post_send(struct ib_qp *qp,
* the work request that failed to be posted on the QP.
*/
static inline int ib_post_recv(struct ib_qp *qp,
- struct ib_recv_wr *recv_wr,
- struct ib_recv_wr **bad_recv_wr)
+ const struct ib_recv_wr *recv_wr,
+ const struct ib_recv_wr **bad_recv_wr)
{
- return qp->device->post_recv(qp, recv_wr, bad_recv_wr);
+ const struct ib_recv_wr *dummy;
+
+ return qp->device->post_recv(qp, recv_wr, bad_recv_wr ? : &dummy);
}
struct ib_cq *__ib_alloc_cq(struct ib_device *dev, void *private,
@@ -3391,11 +3435,14 @@ int ib_process_cq_direct(struct ib_cq *cq, int budget);
*
* Users can examine the cq structure to determine the actual CQ size.
*/
-struct ib_cq *ib_create_cq(struct ib_device *device,
- ib_comp_handler comp_handler,
- void (*event_handler)(struct ib_event *, void *),
- void *cq_context,
- const struct ib_cq_init_attr *cq_attr);
+struct ib_cq *__ib_create_cq(struct ib_device *device,
+ ib_comp_handler comp_handler,
+ void (*event_handler)(struct ib_event *, void *),
+ void *cq_context,
+ const struct ib_cq_init_attr *cq_attr,
+ const char *caller);
+#define ib_create_cq(device, cmp_hndlr, evt_hndlr, cq_ctxt, cq_attr) \
+ __ib_create_cq((device), (cmp_hndlr), (evt_hndlr), (cq_ctxt), (cq_attr), KBUILD_MODNAME)
/**
* ib_resize_cq - Modifies the capacity of the CQ.
@@ -3798,10 +3845,6 @@ struct ib_xrcd *__ib_alloc_xrcd(struct ib_device *device, const char *caller);
*/
int ib_dealloc_xrcd(struct ib_xrcd *xrcd);
-struct ib_flow *ib_create_flow(struct ib_qp *qp,
- struct ib_flow_attr *flow_attr, int domain);
-int ib_destroy_flow(struct ib_flow *flow_id);
-
static inline int ib_check_mr_access(int flags)
{
/*
@@ -4030,8 +4073,19 @@ static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
grh->sgid_index = sgid_index;
grh->hop_limit = hop_limit;
grh->traffic_class = traffic_class;
+ grh->sgid_attr = NULL;
}
+void rdma_destroy_ah_attr(struct rdma_ah_attr *ah_attr);
+void rdma_move_grh_sgid_attr(struct rdma_ah_attr *attr, union ib_gid *dgid,
+ u32 flow_label, u8 hop_limit, u8 traffic_class,
+ const struct ib_gid_attr *sgid_attr);
+void rdma_copy_ah_attr(struct rdma_ah_attr *dest,
+ const struct rdma_ah_attr *src);
+void rdma_replace_ah_attr(struct rdma_ah_attr *old,
+ const struct rdma_ah_attr *new);
+void rdma_move_ah_attr(struct rdma_ah_attr *dest, struct rdma_ah_attr *src);
+
/**
* rdma_ah_find_type - Return address handle type.
*
@@ -4099,6 +4153,20 @@ ib_get_vector_affinity(struct ib_device *device, int comp_vector)
}
+static inline void ib_set_flow(struct ib_uobject *uobj, struct ib_flow *ibflow,
+ struct ib_qp *qp, struct ib_device *device)
+{
+ uobj->object = ibflow;
+ ibflow->uobject = uobj;
+
+ if (qp) {
+ atomic_inc(&qp->usecnt);
+ ibflow->qp = qp;
+ }
+
+ ibflow->device = device;
+}
+
/**
* rdma_roce_rescan_device - Rescan all of the network devices in the system
* and add their gids, as needed, to the relevant RoCE devices.
@@ -4107,4 +4175,8 @@ ib_get_vector_affinity(struct ib_device *device, int comp_vector)
*/
void rdma_roce_rescan_device(struct ib_device *ibdev);
+struct ib_ucontext *ib_uverbs_get_ucontext(struct ib_uverbs_file *ufile);
+
+int uverbs_destroy_def_handler(struct ib_uverbs_file *file,
+ struct uverbs_attr_bundle *attrs);
#endif /* IB_VERBS_H */
diff --git a/include/rdma/opa_addr.h b/include/rdma/opa_addr.h
index 2bbb7a67e643..66d4393d339c 100644
--- a/include/rdma/opa_addr.h
+++ b/include/rdma/opa_addr.h
@@ -120,7 +120,7 @@ static inline bool rdma_is_valid_unicast_lid(struct rdma_ah_attr *attr)
if (attr->type == RDMA_AH_ATTR_TYPE_IB) {
if (!rdma_ah_get_dlid(attr) ||
rdma_ah_get_dlid(attr) >=
- be32_to_cpu(IB_MULTICAST_LID_BASE))
+ be16_to_cpu(IB_MULTICAST_LID_BASE))
return false;
} else if (attr->type == RDMA_AH_ATTR_TYPE_OPA) {
if (!rdma_ah_get_dlid(attr) ||
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index c5c1435c129a..5d71a7f51a9f 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -192,7 +192,7 @@ int rdma_bind_addr(struct rdma_cm_id *id, struct sockaddr *addr);
* @timeout_ms: Time to wait for resolution to complete.
*/
int rdma_resolve_addr(struct rdma_cm_id *id, struct sockaddr *src_addr,
- struct sockaddr *dst_addr, int timeout_ms);
+ const struct sockaddr *dst_addr, int timeout_ms);
/**
* rdma_resolve_route - Resolve the RDMA address bound to the RDMA identifier
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index 1145a4c154b2..927f6d5b6d0f 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -2,7 +2,7 @@
#define DEF_RDMAVT_INCQP_H
/*
- * Copyright(c) 2016, 2017 Intel Corporation.
+ * Copyright(c) 2016 - 2018 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -91,6 +91,7 @@
* RVT_S_WAIT_ACK - waiting for an ACK packet before sending more requests
* RVT_S_SEND_ONE - send one packet, request ACK, then wait for ACK
* RVT_S_ECN - a BECN was queued to the send engine
+ * RVT_S_MAX_BIT_MASK - The max bit that can be used by rdmavt
*/
#define RVT_S_SIGNAL_REQ_WR 0x0001
#define RVT_S_BUSY 0x0002
@@ -103,23 +104,26 @@
#define RVT_S_WAIT_SSN_CREDIT 0x0100
#define RVT_S_WAIT_DMA 0x0200
#define RVT_S_WAIT_PIO 0x0400
-#define RVT_S_WAIT_PIO_DRAIN 0x0800
-#define RVT_S_WAIT_TX 0x1000
-#define RVT_S_WAIT_DMA_DESC 0x2000
-#define RVT_S_WAIT_KMEM 0x4000
-#define RVT_S_WAIT_PSN 0x8000
-#define RVT_S_WAIT_ACK 0x10000
-#define RVT_S_SEND_ONE 0x20000
-#define RVT_S_UNLIMITED_CREDIT 0x40000
-#define RVT_S_AHG_VALID 0x80000
-#define RVT_S_AHG_CLEAR 0x100000
-#define RVT_S_ECN 0x200000
+#define RVT_S_WAIT_TX 0x0800
+#define RVT_S_WAIT_DMA_DESC 0x1000
+#define RVT_S_WAIT_KMEM 0x2000
+#define RVT_S_WAIT_PSN 0x4000
+#define RVT_S_WAIT_ACK 0x8000
+#define RVT_S_SEND_ONE 0x10000
+#define RVT_S_UNLIMITED_CREDIT 0x20000
+#define RVT_S_ECN 0x40000
+#define RVT_S_MAX_BIT_MASK 0x800000
+
+/*
+ * Drivers should use s_flags starting with bit 31 down to the bit next to
+ * RVT_S_MAX_BIT_MASK
+ */
/*
* Wait flags that would prevent any packet type from being sent.
*/
#define RVT_S_ANY_WAIT_IO \
- (RVT_S_WAIT_PIO | RVT_S_WAIT_PIO_DRAIN | RVT_S_WAIT_TX | \
+ (RVT_S_WAIT_PIO | RVT_S_WAIT_TX | \
RVT_S_WAIT_DMA_DESC | RVT_S_WAIT_KMEM)
/*
diff --git a/include/rdma/uverbs_ioctl.h b/include/rdma/uverbs_ioctl.h
index bd6bba3a6e04..9e997c3c2f04 100644
--- a/include/rdma/uverbs_ioctl.h
+++ b/include/rdma/uverbs_ioctl.h
@@ -61,103 +61,195 @@ enum uverbs_obj_access {
UVERBS_ACCESS_DESTROY
};
-enum {
- UVERBS_ATTR_SPEC_F_MANDATORY = 1U << 0,
- /* Support extending attributes by length, validate all unknown size == zero */
- UVERBS_ATTR_SPEC_F_MIN_SZ_OR_ZERO = 1U << 1,
-};
-
/* Specification of a single attribute inside the ioctl message */
+/* good size 16 */
struct uverbs_attr_spec {
+ u8 type;
+
+ /*
+ * Support extending attributes by length. Allow the user to provide
+ * more bytes than ptr.len, but check that everything after is zero'd
+ * by the user.
+ */
+ u8 zero_trailing:1;
+ /*
+ * Valid only for PTR_IN. Allocate and copy the data inside
+ * the parser
+ */
+ u8 alloc_and_copy:1;
+ u8 mandatory:1;
+
union {
- /* Header shared by all following union members - to reduce space. */
- struct {
- enum uverbs_attr_type type;
- /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */
- u8 flags;
- };
struct {
- enum uverbs_attr_type type;
- /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */
- u8 flags;
/* Current known size to kernel */
- u16 len;
+ u16 len;
/* User isn't allowed to provide something < min_len */
- u16 min_len;
+ u16 min_len;
} ptr;
+
struct {
- enum uverbs_attr_type type;
- /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */
- u8 flags;
/*
* higher bits mean the namespace and lower bits mean
* the type id within the namespace.
*/
- u16 obj_type;
- u8 access;
+ u16 obj_type;
+ u8 access;
} obj;
+
+ struct {
+ u8 num_elems;
+ } enum_def;
+ } u;
+
+ /* This weird split of the enum lets us remove some padding */
+ union {
struct {
- enum uverbs_attr_type type;
- /* Combination of bits from enum UVERBS_ATTR_SPEC_F_XXXX */
- u8 flags;
- u8 num_elems;
/*
* The enum attribute can select one of the attributes
* contained in the ids array. Currently only PTR_IN
* attributes are supported in the ids array.
*/
- const struct uverbs_attr_spec *ids;
+ const struct uverbs_attr_spec *ids;
} enum_def;
- };
+ } u2;
};
-struct uverbs_attr_spec_hash {
- size_t num_attrs;
- unsigned long *mandatory_attrs_bitmask;
- struct uverbs_attr_spec attrs[0];
+/*
+ * Information about the API is loaded into a radix tree. For IOCTL we start
+ * with a tuple of:
+ * object_id, attr_id, method_id
+ *
+ * Which is a 48 bit value, with most of the bits guaranteed to be zero. Based
+ * on the current kernel support this is compressed into 16 bit key for the
+ * radix tree. Since this compression is entirely internal to the kernel the
+ * below limits can be revised if the kernel gains additional data.
+ *
+ * With 64 leafs per node this is a 3 level radix tree.
+ *
+ * The tree encodes multiple types, and uses a scheme where OBJ_ID,0,0 returns
+ * the object slot, and OBJ_ID,METH_ID,0 and returns the method slot.
+ */
+enum uapi_radix_data {
+ UVERBS_API_NS_FLAG = 1U << UVERBS_ID_NS_SHIFT,
+
+ UVERBS_API_ATTR_KEY_BITS = 6,
+ UVERBS_API_ATTR_KEY_MASK = GENMASK(UVERBS_API_ATTR_KEY_BITS - 1, 0),
+ UVERBS_API_ATTR_BKEY_LEN = (1 << UVERBS_API_ATTR_KEY_BITS) - 1,
+
+ UVERBS_API_METHOD_KEY_BITS = 5,
+ UVERBS_API_METHOD_KEY_SHIFT = UVERBS_API_ATTR_KEY_BITS,
+ UVERBS_API_METHOD_KEY_NUM_CORE = 24,
+ UVERBS_API_METHOD_KEY_NUM_DRIVER = (1 << UVERBS_API_METHOD_KEY_BITS) -
+ UVERBS_API_METHOD_KEY_NUM_CORE,
+ UVERBS_API_METHOD_KEY_MASK = GENMASK(
+ UVERBS_API_METHOD_KEY_BITS + UVERBS_API_METHOD_KEY_SHIFT - 1,
+ UVERBS_API_METHOD_KEY_SHIFT),
+
+ UVERBS_API_OBJ_KEY_BITS = 5,
+ UVERBS_API_OBJ_KEY_SHIFT =
+ UVERBS_API_METHOD_KEY_BITS + UVERBS_API_METHOD_KEY_SHIFT,
+ UVERBS_API_OBJ_KEY_NUM_CORE = 24,
+ UVERBS_API_OBJ_KEY_NUM_DRIVER =
+ (1 << UVERBS_API_OBJ_KEY_BITS) - UVERBS_API_OBJ_KEY_NUM_CORE,
+ UVERBS_API_OBJ_KEY_MASK = GENMASK(31, UVERBS_API_OBJ_KEY_SHIFT),
+
+ /* This id guaranteed to not exist in the radix tree */
+ UVERBS_API_KEY_ERR = 0xFFFFFFFF,
};
-struct uverbs_attr_bundle;
-struct ib_uverbs_file;
+static inline __attribute_const__ u32 uapi_key_obj(u32 id)
+{
+ if (id & UVERBS_API_NS_FLAG) {
+ id &= ~UVERBS_API_NS_FLAG;
+ if (id >= UVERBS_API_OBJ_KEY_NUM_DRIVER)
+ return UVERBS_API_KEY_ERR;
+ id = id + UVERBS_API_OBJ_KEY_NUM_CORE;
+ } else {
+ if (id >= UVERBS_API_OBJ_KEY_NUM_CORE)
+ return UVERBS_API_KEY_ERR;
+ }
-enum {
- /*
- * Action marked with this flag creates a context (or root for all
- * objects).
- */
- UVERBS_ACTION_FLAG_CREATE_ROOT = 1U << 0,
-};
+ return id << UVERBS_API_OBJ_KEY_SHIFT;
+}
-struct uverbs_method_spec {
- /* Combination of bits from enum UVERBS_ACTION_FLAG_XXXX */
- u32 flags;
- size_t num_buckets;
- size_t num_child_attrs;
- int (*handler)(struct ib_device *ib_dev, struct ib_uverbs_file *ufile,
- struct uverbs_attr_bundle *ctx);
- struct uverbs_attr_spec_hash *attr_buckets[0];
-};
+static inline __attribute_const__ bool uapi_key_is_object(u32 key)
+{
+ return (key & ~UVERBS_API_OBJ_KEY_MASK) == 0;
+}
-struct uverbs_method_spec_hash {
- size_t num_methods;
- struct uverbs_method_spec *methods[0];
-};
+static inline __attribute_const__ u32 uapi_key_ioctl_method(u32 id)
+{
+ if (id & UVERBS_API_NS_FLAG) {
+ id &= ~UVERBS_API_NS_FLAG;
+ if (id >= UVERBS_API_METHOD_KEY_NUM_DRIVER)
+ return UVERBS_API_KEY_ERR;
+ id = id + UVERBS_API_METHOD_KEY_NUM_CORE;
+ } else {
+ id++;
+ if (id >= UVERBS_API_METHOD_KEY_NUM_CORE)
+ return UVERBS_API_KEY_ERR;
+ }
-struct uverbs_object_spec {
- const struct uverbs_obj_type *type_attrs;
- size_t num_buckets;
- struct uverbs_method_spec_hash *method_buckets[0];
-};
+ return id << UVERBS_API_METHOD_KEY_SHIFT;
+}
-struct uverbs_object_spec_hash {
- size_t num_objects;
- struct uverbs_object_spec *objects[0];
-};
+static inline __attribute_const__ u32 uapi_key_attr_to_method(u32 attr_key)
+{
+ return attr_key &
+ (UVERBS_API_OBJ_KEY_MASK | UVERBS_API_METHOD_KEY_MASK);
+}
-struct uverbs_root_spec {
- size_t num_buckets;
- struct uverbs_object_spec_hash *object_buckets[0];
-};
+static inline __attribute_const__ bool uapi_key_is_ioctl_method(u32 key)
+{
+ return (key & UVERBS_API_METHOD_KEY_MASK) != 0 &&
+ (key & UVERBS_API_ATTR_KEY_MASK) == 0;
+}
+
+static inline __attribute_const__ u32 uapi_key_attrs_start(u32 ioctl_method_key)
+{
+ /* 0 is the method slot itself */
+ return ioctl_method_key + 1;
+}
+
+static inline __attribute_const__ u32 uapi_key_attr(u32 id)
+{
+ /*
+ * The attr is designed to fit in the typical single radix tree node
+ * of 64 entries. Since allmost all methods have driver attributes we
+ * organize things so that the driver and core attributes interleave to
+ * reduce the length of the attributes array in typical cases.
+ */
+ if (id & UVERBS_API_NS_FLAG) {
+ id &= ~UVERBS_API_NS_FLAG;
+ id++;
+ if (id >= 1 << (UVERBS_API_ATTR_KEY_BITS - 1))
+ return UVERBS_API_KEY_ERR;
+ id = (id << 1) | 0;
+ } else {
+ if (id >= 1 << (UVERBS_API_ATTR_KEY_BITS - 1))
+ return UVERBS_API_KEY_ERR;
+ id = (id << 1) | 1;
+ }
+
+ return id;
+}
+
+static inline __attribute_const__ bool uapi_key_is_attr(u32 key)
+{
+ return (key & UVERBS_API_METHOD_KEY_MASK) != 0 &&
+ (key & UVERBS_API_ATTR_KEY_MASK) != 0;
+}
+
+/*
+ * This returns a value in the range [0 to UVERBS_API_ATTR_BKEY_LEN),
+ * basically it undoes the reservation of 0 in the ID numbering. attr_key
+ * must already be masked with UVERBS_API_ATTR_KEY_MASK, or be the output of
+ * uapi_key_attr().
+ */
+static inline __attribute_const__ u32 uapi_bkey_attr(u32 attr_key)
+{
+ return attr_key - 1;
+}
/*
* =======================================
@@ -176,7 +268,7 @@ struct uverbs_method_def {
u32 flags;
size_t num_attrs;
const struct uverbs_attr_def * const (*attrs)[];
- int (*handler)(struct ib_device *ib_dev, struct ib_uverbs_file *ufile,
+ int (*handler)(struct ib_uverbs_file *ufile,
struct uverbs_attr_bundle *ctx);
};
@@ -192,196 +284,171 @@ struct uverbs_object_tree_def {
const struct uverbs_object_def * const (*objects)[];
};
-#define UA_FLAGS(_flags) .flags = _flags
-#define __UVERBS_ATTR0(_id, _type, _fld, _attr, ...) \
- ((const struct uverbs_attr_def) \
- {.id = _id, .attr = {{._fld = {.type = _type, _attr, .flags = 0, } }, } })
-#define __UVERBS_ATTR1(_id, _type, _fld, _attr, _extra1, ...) \
- ((const struct uverbs_attr_def) \
- {.id = _id, .attr = {{._fld = {.type = _type, _attr, _extra1 } },} })
-#define __UVERBS_ATTR2(_id, _type, _fld, _attr, _extra1, _extra2) \
- ((const struct uverbs_attr_def) \
- {.id = _id, .attr = {{._fld = {.type = _type, _attr, _extra1, _extra2 } },} })
-#define __UVERBS_ATTR(_id, _type, _fld, _attr, _extra1, _extra2, _n, ...) \
- __UVERBS_ATTR##_n(_id, _type, _fld, _attr, _extra1, _extra2)
+/*
+ * =======================================
+ * Attribute Specifications
+ * =======================================
+ */
-#define UVERBS_ATTR_TYPE(_type) \
- .min_len = sizeof(_type), .len = sizeof(_type)
-#define UVERBS_ATTR_STRUCT(_type, _last) \
- .min_len = ((uintptr_t)(&((_type *)0)->_last + 1)), .len = sizeof(_type)
#define UVERBS_ATTR_SIZE(_min_len, _len) \
- .min_len = _min_len, .len = _len
+ .u.ptr.min_len = _min_len, .u.ptr.len = _len
+
+#define UVERBS_ATTR_NO_DATA() UVERBS_ATTR_SIZE(0, 0)
/*
- * In new compiler, UVERBS_ATTR could be simplified by declaring it as
- * [_id] = {.type = _type, .len = _len, ##__VA_ARGS__}
- * But since we support older compilers too, we need the more complex code.
+ * Specifies a uapi structure that cannot be extended. The user must always
+ * supply the whole structure and nothing more. The structure must be declared
+ * in a header under include/uapi/rdma.
*/
-#define UVERBS_ATTR(_id, _type, _fld, _attr, ...) \
- __UVERBS_ATTR(_id, _type, _fld, _attr, ##__VA_ARGS__, 2, 1, 0)
-#define UVERBS_ATTR_PTR_IN_SZ(_id, _len, ...) \
- UVERBS_ATTR(_id, UVERBS_ATTR_TYPE_PTR_IN, ptr, _len, ##__VA_ARGS__)
-/* If sizeof(_type) <= sizeof(u64), this will be inlined rather than a pointer */
-#define UVERBS_ATTR_PTR_IN(_id, _type, ...) \
- UVERBS_ATTR_PTR_IN_SZ(_id, _type, ##__VA_ARGS__)
-#define UVERBS_ATTR_PTR_OUT_SZ(_id, _len, ...) \
- UVERBS_ATTR(_id, UVERBS_ATTR_TYPE_PTR_OUT, ptr, _len, ##__VA_ARGS__)
-#define UVERBS_ATTR_PTR_OUT(_id, _type, ...) \
- UVERBS_ATTR_PTR_OUT_SZ(_id, _type, ##__VA_ARGS__)
-#define UVERBS_ATTR_ENUM_IN(_id, _enum_arr, ...) \
- UVERBS_ATTR(_id, UVERBS_ATTR_TYPE_ENUM_IN, enum_def, \
- .ids = (_enum_arr), \
- .num_elems = ARRAY_SIZE(_enum_arr), ##__VA_ARGS__)
+#define UVERBS_ATTR_TYPE(_type) \
+ .u.ptr.min_len = sizeof(_type), .u.ptr.len = sizeof(_type)
+/*
+ * Specifies a uapi structure where the user must provide at least up to
+ * member 'last'. Anything after last and up until the end of the structure
+ * can be non-zero, anything longer than the end of the structure must be
+ * zero. The structure must be declared in a header under include/uapi/rdma.
+ */
+#define UVERBS_ATTR_STRUCT(_type, _last) \
+ .zero_trailing = 1, \
+ UVERBS_ATTR_SIZE(((uintptr_t)(&((_type *)0)->_last + 1)), \
+ sizeof(_type))
+/*
+ * Specifies at least min_len bytes must be passed in, but the amount can be
+ * larger, up to the protocol maximum size. No check for zeroing is done.
+ */
+#define UVERBS_ATTR_MIN_SIZE(_min_len) UVERBS_ATTR_SIZE(_min_len, USHRT_MAX)
+
+/* Must be used in the '...' of any UVERBS_ATTR */
+#define UA_ALLOC_AND_COPY .alloc_and_copy = 1
+#define UA_MANDATORY .mandatory = 1
+#define UA_OPTIONAL .mandatory = 0
+
+#define UVERBS_ATTR_IDR(_attr_id, _idr_type, _access, ...) \
+ (&(const struct uverbs_attr_def){ \
+ .id = _attr_id, \
+ .attr = { .type = UVERBS_ATTR_TYPE_IDR, \
+ .u.obj.obj_type = _idr_type, \
+ .u.obj.access = _access, \
+ __VA_ARGS__ } })
+
+#define UVERBS_ATTR_FD(_attr_id, _fd_type, _access, ...) \
+ (&(const struct uverbs_attr_def){ \
+ .id = (_attr_id) + \
+ BUILD_BUG_ON_ZERO((_access) != UVERBS_ACCESS_NEW && \
+ (_access) != UVERBS_ACCESS_READ), \
+ .attr = { .type = UVERBS_ATTR_TYPE_FD, \
+ .u.obj.obj_type = _fd_type, \
+ .u.obj.access = _access, \
+ __VA_ARGS__ } })
+
+#define UVERBS_ATTR_PTR_IN(_attr_id, _type, ...) \
+ (&(const struct uverbs_attr_def){ \
+ .id = _attr_id, \
+ .attr = { .type = UVERBS_ATTR_TYPE_PTR_IN, \
+ _type, \
+ __VA_ARGS__ } })
+
+#define UVERBS_ATTR_PTR_OUT(_attr_id, _type, ...) \
+ (&(const struct uverbs_attr_def){ \
+ .id = _attr_id, \
+ .attr = { .type = UVERBS_ATTR_TYPE_PTR_OUT, \
+ _type, \
+ __VA_ARGS__ } })
+
+/* _enum_arry should be a 'static const union uverbs_attr_spec[]' */
+#define UVERBS_ATTR_ENUM_IN(_attr_id, _enum_arr, ...) \
+ (&(const struct uverbs_attr_def){ \
+ .id = _attr_id, \
+ .attr = { .type = UVERBS_ATTR_TYPE_ENUM_IN, \
+ .u2.enum_def.ids = _enum_arr, \
+ .u.enum_def.num_elems = ARRAY_SIZE(_enum_arr), \
+ __VA_ARGS__ }, \
+ })
/*
- * In new compiler, UVERBS_ATTR_IDR (and FD) could be simplified by declaring
- * it as
- * {.id = _id, \
- * .attr {.type = __obj_class, \
- * .obj = {.obj_type = _idr_type, \
- * .access = _access \
- * }, ##__VA_ARGS__ } }
- * But since we support older compilers too, we need the more complex code.
+ * An input value that is a bitwise combination of values of _enum_type.
+ * This permits the flag value to be passed as either a u32 or u64, it must
+ * be retrieved via uverbs_get_flag().
*/
-#define ___UVERBS_ATTR_OBJ0(_id, _obj_class, _obj_type, _access, ...)\
- ((const struct uverbs_attr_def) \
- {.id = _id, \
- .attr = { {.obj = {.type = _obj_class, .obj_type = _obj_type, \
- .access = _access, .flags = 0 } }, } })
-#define ___UVERBS_ATTR_OBJ1(_id, _obj_class, _obj_type, _access, _flags)\
- ((const struct uverbs_attr_def) \
- {.id = _id, \
- .attr = { {.obj = {.type = _obj_class, .obj_type = _obj_type, \
- .access = _access, _flags} }, } })
-#define ___UVERBS_ATTR_OBJ(_id, _obj_class, _obj_type, _access, _flags, \
- _n, ...) \
- ___UVERBS_ATTR_OBJ##_n(_id, _obj_class, _obj_type, _access, _flags)
-#define __UVERBS_ATTR_OBJ(_id, _obj_class, _obj_type, _access, ...) \
- ___UVERBS_ATTR_OBJ(_id, _obj_class, _obj_type, _access, \
- ##__VA_ARGS__, 1, 0)
-#define UVERBS_ATTR_IDR(_id, _idr_type, _access, ...) \
- __UVERBS_ATTR_OBJ(_id, UVERBS_ATTR_TYPE_IDR, _idr_type, _access,\
- ##__VA_ARGS__)
-#define UVERBS_ATTR_FD(_id, _fd_type, _access, ...) \
- __UVERBS_ATTR_OBJ(_id, UVERBS_ATTR_TYPE_FD, _fd_type, \
- (_access) + BUILD_BUG_ON_ZERO( \
- (_access) != UVERBS_ACCESS_NEW && \
- (_access) != UVERBS_ACCESS_READ), \
- ##__VA_ARGS__)
-#define DECLARE_UVERBS_ATTR_SPEC(_name, ...) \
- const struct uverbs_attr_def _name = __VA_ARGS__
-
-#define DECLARE_UVERBS_ENUM(_name, ...) \
- const struct uverbs_enum_spec _name = { \
- .len = ARRAY_SIZE(((struct uverbs_attr_spec[]){__VA_ARGS__})),\
- .ids = {__VA_ARGS__}, \
+#define UVERBS_ATTR_FLAGS_IN(_attr_id, _enum_type, ...) \
+ UVERBS_ATTR_PTR_IN( \
+ _attr_id, \
+ UVERBS_ATTR_SIZE(sizeof(u32) + BUILD_BUG_ON_ZERO( \
+ !sizeof(_enum_type *)), \
+ sizeof(u64)), \
+ __VA_ARGS__)
+
+/*
+ * This spec is used in order to pass information to the hardware driver in a
+ * legacy way. Every verb that could get driver specific data should get this
+ * spec.
+ */
+#define UVERBS_ATTR_UHW() \
+ UVERBS_ATTR_PTR_IN(UVERBS_ATTR_UHW_IN, \
+ UVERBS_ATTR_MIN_SIZE(0), \
+ UA_OPTIONAL), \
+ UVERBS_ATTR_PTR_OUT(UVERBS_ATTR_UHW_OUT, \
+ UVERBS_ATTR_MIN_SIZE(0), \
+ UA_OPTIONAL)
+
+/*
+ * =======================================
+ * Declaration helpers
+ * =======================================
+ */
+
+#define DECLARE_UVERBS_OBJECT_TREE(_name, ...) \
+ static const struct uverbs_object_def *const _name##_ptr[] = { \
+ __VA_ARGS__, \
+ }; \
+ static const struct uverbs_object_tree_def _name = { \
+ .num_objects = ARRAY_SIZE(_name##_ptr), \
+ .objects = &_name##_ptr, \
}
-#define _UVERBS_METHOD_ATTRS_SZ(...) \
- (sizeof((const struct uverbs_attr_def * const []){__VA_ARGS__}) /\
- sizeof(const struct uverbs_attr_def *))
-#define _UVERBS_METHOD(_id, _handler, _flags, ...) \
- ((const struct uverbs_method_def) { \
- .id = _id, \
- .flags = _flags, \
- .handler = _handler, \
- .num_attrs = _UVERBS_METHOD_ATTRS_SZ(__VA_ARGS__), \
- .attrs = &(const struct uverbs_attr_def * const []){__VA_ARGS__} })
-#define DECLARE_UVERBS_METHOD(_name, _id, _handler, ...) \
- const struct uverbs_method_def _name = \
- _UVERBS_METHOD(_id, _handler, 0, ##__VA_ARGS__)
-#define DECLARE_UVERBS_CTX_METHOD(_name, _id, _handler, _flags, ...) \
- const struct uverbs_method_def _name = \
- _UVERBS_METHOD(_id, _handler, \
- UVERBS_ACTION_FLAG_CREATE_ROOT, \
- ##__VA_ARGS__)
-#define _UVERBS_OBJECT_METHODS_SZ(...) \
- (sizeof((const struct uverbs_method_def * const []){__VA_ARGS__}) / \
- sizeof(const struct uverbs_method_def *))
-#define _UVERBS_OBJECT(_id, _type_attrs, ...) \
- ((const struct uverbs_object_def) { \
- .id = _id, \
- .type_attrs = _type_attrs, \
- .num_methods = _UVERBS_OBJECT_METHODS_SZ(__VA_ARGS__), \
- .methods = &(const struct uverbs_method_def * const []){__VA_ARGS__} })
-#define DECLARE_UVERBS_OBJECT(_name, _id, _type_attrs, ...) \
- const struct uverbs_object_def _name = \
- _UVERBS_OBJECT(_id, _type_attrs, ##__VA_ARGS__)
-#define _UVERBS_TREE_OBJECTS_SZ(...) \
- (sizeof((const struct uverbs_object_def * const []){__VA_ARGS__}) / \
- sizeof(const struct uverbs_object_def *))
-#define _UVERBS_OBJECT_TREE(...) \
- ((const struct uverbs_object_tree_def) { \
- .num_objects = _UVERBS_TREE_OBJECTS_SZ(__VA_ARGS__), \
- .objects = &(const struct uverbs_object_def * const []){__VA_ARGS__} })
-#define DECLARE_UVERBS_OBJECT_TREE(_name, ...) \
- const struct uverbs_object_tree_def _name = \
- _UVERBS_OBJECT_TREE(__VA_ARGS__)
/* =================================================
* Parsing infrastructure
* =================================================
*/
+
struct uverbs_ptr_attr {
- u64 data;
+ /*
+ * If UVERBS_ATTR_SPEC_F_ALLOC_AND_COPY is set then the 'ptr' is
+ * used.
+ */
+ union {
+ void *ptr;
+ u64 data;
+ };
u16 len;
- /* Combination of bits from enum UVERBS_ATTR_F_XXXX */
- u16 flags;
+ u16 uattr_idx;
u8 enum_id;
};
struct uverbs_obj_attr {
- /* pointer to the kernel descriptor -> type, access, etc */
- const struct uverbs_obj_type *type;
struct ib_uobject *uobject;
- /* fd or id in idr of this object */
- int id;
+ const struct uverbs_api_attr *attr_elm;
};
struct uverbs_attr {
- /*
- * pointer to the user-space given attribute, in order to write the
- * new uobject's id or update flags.
- */
- struct ib_uverbs_attr __user *uattr;
union {
struct uverbs_ptr_attr ptr_attr;
struct uverbs_obj_attr obj_attr;
};
};
-struct uverbs_attr_bundle_hash {
- /* if bit i is set, it means attrs[i] contains valid information */
- unsigned long *valid_bitmap;
- size_t num_attrs;
- /*
- * arrays of attributes, each element corresponds to the specification
- * of the attribute in the same index.
- */
- struct uverbs_attr *attrs;
-};
-
struct uverbs_attr_bundle {
- size_t num_buckets;
- struct uverbs_attr_bundle_hash hash[];
+ struct ib_uverbs_file *ufile;
+ DECLARE_BITMAP(attr_present, UVERBS_API_ATTR_BKEY_LEN);
+ struct uverbs_attr attrs[];
};
-static inline bool uverbs_attr_is_valid_in_hash(const struct uverbs_attr_bundle_hash *attrs_hash,
- unsigned int idx)
-{
- return test_bit(idx, attrs_hash->valid_bitmap);
-}
-
static inline bool uverbs_attr_is_valid(const struct uverbs_attr_bundle *attrs_bundle,
unsigned int idx)
{
- u16 idx_bucket = idx >> UVERBS_ID_NS_SHIFT;
-
- if (attrs_bundle->num_buckets <= idx_bucket)
- return false;
-
- return uverbs_attr_is_valid_in_hash(&attrs_bundle->hash[idx_bucket],
- idx & ~UVERBS_ID_NS_MASK);
+ return test_bit(uapi_bkey_attr(uapi_key_attr(idx)),
+ attrs_bundle->attr_present);
}
#define IS_UVERBS_COPY_ERR(_ret) ((_ret) && (_ret) != -ENOENT)
@@ -389,12 +456,10 @@ static inline bool uverbs_attr_is_valid(const struct uverbs_attr_bundle *attrs_b
static inline const struct uverbs_attr *uverbs_attr_get(const struct uverbs_attr_bundle *attrs_bundle,
u16 idx)
{
- u16 idx_bucket = idx >> UVERBS_ID_NS_SHIFT;
-
if (!uverbs_attr_is_valid(attrs_bundle, idx))
return ERR_PTR(-ENOENT);
- return &attrs_bundle->hash[idx_bucket].attrs[idx & ~UVERBS_ID_NS_MASK];
+ return &attrs_bundle->attrs[uapi_bkey_attr(uapi_key_attr(idx))];
}
static inline int uverbs_attr_get_enum_id(const struct uverbs_attr_bundle *attrs_bundle,
@@ -431,25 +496,15 @@ static inline struct ib_uobject *uverbs_attr_get_uobject(const struct uverbs_att
return attr->obj_attr.uobject;
}
-static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle,
- size_t idx, const void *from, size_t size)
+static inline int
+uverbs_attr_get_len(const struct uverbs_attr_bundle *attrs_bundle, u16 idx)
{
const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
- u16 flags;
- size_t min_size;
if (IS_ERR(attr))
return PTR_ERR(attr);
- min_size = min_t(size_t, attr->ptr_attr.len, size);
- if (copy_to_user(u64_to_user_ptr(attr->ptr_attr.data), from, min_size))
- return -EFAULT;
-
- flags = attr->ptr_attr.flags | UVERBS_ATTR_F_VALID_OUTPUT;
- if (put_user(flags, &attr->uattr->flags))
- return -EFAULT;
-
- return 0;
+ return attr->ptr_attr.len;
}
static inline bool uverbs_attr_ptr_is_inline(const struct uverbs_attr *attr)
@@ -457,6 +512,18 @@ static inline bool uverbs_attr_ptr_is_inline(const struct uverbs_attr *attr)
return attr->ptr_attr.len <= sizeof(attr->ptr_attr.data);
}
+static inline void *uverbs_attr_get_alloced_ptr(
+ const struct uverbs_attr_bundle *attrs_bundle, u16 idx)
+{
+ const struct uverbs_attr *attr = uverbs_attr_get(attrs_bundle, idx);
+
+ if (IS_ERR(attr))
+ return (void *)attr;
+
+ return uverbs_attr_ptr_is_inline(attr) ? (void *)&attr->ptr_attr.data :
+ attr->ptr_attr.ptr;
+}
+
static inline int _uverbs_copy_from(void *to,
const struct uverbs_attr_bundle *attrs_bundle,
size_t idx,
@@ -515,54 +582,54 @@ static inline int _uverbs_copy_from_or_zero(void *to,
#define uverbs_copy_from_or_zero(to, attrs_bundle, idx) \
_uverbs_copy_from_or_zero(to, attrs_bundle, idx, sizeof(*to))
-/* =================================================
- * Definitions -> Specs infrastructure
- * =================================================
- */
-
-/*
- * uverbs_alloc_spec_tree - Merges different common and driver specific feature
- * into one parsing tree that every uverbs command will be parsed upon.
- *
- * @num_trees: Number of trees in the array @trees.
- * @trees: Array of pointers to tree root definitions to merge. Each such tree
- * possibly contains objects, methods and attributes definitions.
- *
- * Returns:
- * uverbs_root_spec *: The root of the merged parsing tree.
- * On error, we return an error code. Error is checked via IS_ERR.
- *
- * The following merges could take place:
- * a. Two trees representing the same method with different handler
- * -> We take the handler of the tree that its handler != NULL
- * and its index in the trees array is greater. The incentive for that
- * is that developers are expected to first merge common trees and then
- * merge trees that gives specialized the behaviour.
- * b. Two trees representing the same object with different
- * type_attrs (struct uverbs_obj_type):
- * -> We take the type_attrs of the tree that its type_attr != NULL
- * and its index in the trees array is greater. This could be used
- * in order to override the free function, allocation size, etc.
- * c. Two trees representing the same method attribute (same id but possibly
- * different attributes):
- * -> ERROR (-ENOENT), we believe that's not the programmer's intent.
- *
- * An object without any methods is considered invalid and will abort the
- * function with -ENOENT error.
- */
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
-struct uverbs_root_spec *uverbs_alloc_spec_tree(unsigned int num_trees,
- const struct uverbs_object_tree_def **trees);
-void uverbs_free_spec_tree(struct uverbs_root_spec *root);
-#else
-static inline struct uverbs_root_spec *uverbs_alloc_spec_tree(unsigned int num_trees,
- const struct uverbs_object_tree_def **trees)
+int uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, u64 allowed_bits);
+int uverbs_get_flags32(u32 *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, u64 allowed_bits);
+int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle, size_t idx,
+ const void *from, size_t size);
+__malloc void *_uverbs_alloc(struct uverbs_attr_bundle *bundle, size_t size,
+ gfp_t flags);
+
+static inline __malloc void *uverbs_alloc(struct uverbs_attr_bundle *bundle,
+ size_t size)
{
- return NULL;
+ return _uverbs_alloc(bundle, size, GFP_KERNEL);
}
-static inline void uverbs_free_spec_tree(struct uverbs_root_spec *root)
+static inline __malloc void *uverbs_zalloc(struct uverbs_attr_bundle *bundle,
+ size_t size)
+{
+ return _uverbs_alloc(bundle, size, GFP_KERNEL | __GFP_ZERO);
+}
+#else
+static inline int
+uverbs_get_flags64(u64 *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, u64 allowed_bits)
+{
+ return -EINVAL;
+}
+static inline int
+uverbs_get_flags32(u32 *to, const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, u64 allowed_bits)
+{
+ return -EINVAL;
+}
+static inline int uverbs_copy_to(const struct uverbs_attr_bundle *attrs_bundle,
+ size_t idx, const void *from, size_t size)
+{
+ return -EINVAL;
+}
+static inline __malloc void *uverbs_alloc(struct uverbs_attr_bundle *bundle,
+ size_t size)
+{
+ return ERR_PTR(-EINVAL);
+}
+static inline __malloc void *uverbs_zalloc(struct uverbs_attr_bundle *bundle,
+ size_t size)
{
+ return ERR_PTR(-EINVAL);
}
#endif
diff --git a/include/rdma/uverbs_named_ioctl.h b/include/rdma/uverbs_named_ioctl.h
index c5bb4ebdb0b0..b3b21733cc55 100644
--- a/include/rdma/uverbs_named_ioctl.h
+++ b/include/rdma/uverbs_named_ioctl.h
@@ -43,48 +43,89 @@
#define _UVERBS_NAME(x, y) _UVERBS_PASTE(x, y)
#define UVERBS_METHOD(id) _UVERBS_NAME(UVERBS_MODULE_NAME, _method_##id)
#define UVERBS_HANDLER(id) _UVERBS_NAME(UVERBS_MODULE_NAME, _handler_##id)
+#define UVERBS_OBJECT(id) _UVERBS_NAME(UVERBS_MOUDLE_NAME, _object_##id)
-#define DECLARE_UVERBS_NAMED_METHOD(id, ...) \
- DECLARE_UVERBS_METHOD(UVERBS_METHOD(id), id, UVERBS_HANDLER(id), ##__VA_ARGS__)
+/* These are static so they do not need to be qualified */
+#define UVERBS_METHOD_ATTRS(method_id) _method_attrs_##method_id
+#define UVERBS_OBJECT_METHODS(object_id) _object_methods_##object_id
-#define DECLARE_UVERBS_NAMED_METHOD_WITH_HANDLER(id, handler, ...) \
- DECLARE_UVERBS_METHOD(UVERBS_METHOD(id), id, handler, ##__VA_ARGS__)
+#define DECLARE_UVERBS_NAMED_METHOD(_method_id, ...) \
+ static const struct uverbs_attr_def *const UVERBS_METHOD_ATTRS( \
+ _method_id)[] = { __VA_ARGS__ }; \
+ static const struct uverbs_method_def UVERBS_METHOD(_method_id) = { \
+ .id = _method_id, \
+ .handler = UVERBS_HANDLER(_method_id), \
+ .num_attrs = ARRAY_SIZE(UVERBS_METHOD_ATTRS(_method_id)), \
+ .attrs = &UVERBS_METHOD_ATTRS(_method_id), \
+ }
-#define DECLARE_UVERBS_NAMED_METHOD_NO_OVERRIDE(id, handler, ...) \
- DECLARE_UVERBS_METHOD(UVERBS_METHOD(id), id, NULL, ##__VA_ARGS__)
-
-#define DECLARE_UVERBS_NAMED_OBJECT(id, ...) \
- DECLARE_UVERBS_OBJECT(UVERBS_OBJECT(id), id, ##__VA_ARGS__)
+/* Create a standard destroy method using the default handler. The handle_attr
+ * argument must be the attribute specifying the handle to destroy, the
+ * default handler does not support any other attributes.
+ */
+#define DECLARE_UVERBS_NAMED_METHOD_DESTROY(_method_id, _handle_attr) \
+ static const struct uverbs_attr_def *const UVERBS_METHOD_ATTRS( \
+ _method_id)[] = { _handle_attr }; \
+ static const struct uverbs_method_def UVERBS_METHOD(_method_id) = { \
+ .id = _method_id, \
+ .handler = uverbs_destroy_def_handler, \
+ .num_attrs = ARRAY_SIZE(UVERBS_METHOD_ATTRS(_method_id)), \
+ .attrs = &UVERBS_METHOD_ATTRS(_method_id), \
+ }
-#define _UVERBS_COMP_NAME(x, y, z) _UVERBS_NAME(_UVERBS_NAME(x, y), z)
+#define DECLARE_UVERBS_NAMED_OBJECT(_object_id, _type_attrs, ...) \
+ static const struct uverbs_method_def *const UVERBS_OBJECT_METHODS( \
+ _object_id)[] = { __VA_ARGS__ }; \
+ const struct uverbs_object_def UVERBS_OBJECT(_object_id) = { \
+ .id = _object_id, \
+ .type_attrs = &_type_attrs, \
+ .num_methods = ARRAY_SIZE(UVERBS_OBJECT_METHODS(_object_id)), \
+ .methods = &UVERBS_OBJECT_METHODS(_object_id) \
+ }
-#define UVERBS_NO_OVERRIDE NULL
+/*
+ * Declare global methods. These still have a unique object_id because we
+ * identify all uapi methods with a (object,method) tuple. However, they have
+ * no type pointer.
+ */
+#define DECLARE_UVERBS_GLOBAL_METHODS(_object_id, ...) \
+ static const struct uverbs_method_def *const UVERBS_OBJECT_METHODS( \
+ _object_id)[] = { __VA_ARGS__ }; \
+ const struct uverbs_object_def UVERBS_OBJECT(_object_id) = { \
+ .id = _object_id, \
+ .num_methods = ARRAY_SIZE(UVERBS_OBJECT_METHODS(_object_id)), \
+ .methods = &UVERBS_OBJECT_METHODS(_object_id) \
+ }
-/* This declares a parsing tree with one object and one method. This is usually
- * used for merging driver attributes to the common attributes. The driver has
- * a chance to override the handler and type attrs of the original object.
- * The __VA_ARGS__ just contains a list of attributes.
+/* Used by drivers to declare a complete parsing tree for new methods
*/
-#define ADD_UVERBS_ATTRIBUTES(_name, _object, _method, _type_attrs, _handler, ...) \
-static DECLARE_UVERBS_METHOD(_UVERBS_COMP_NAME(UVERBS_MODULE_NAME, \
- _method_, _name), \
- _method, _handler, ##__VA_ARGS__); \
- \
-static DECLARE_UVERBS_OBJECT(_UVERBS_COMP_NAME(UVERBS_MODULE_NAME, \
- _object_, _name), \
- _object, _type_attrs, \
- &_UVERBS_COMP_NAME(UVERBS_MODULE_NAME, \
- _method_, _name)); \
- \
-static DECLARE_UVERBS_OBJECT_TREE(_name, \
- &_UVERBS_COMP_NAME(UVERBS_MODULE_NAME, \
- _object_, _name))
+#define ADD_UVERBS_METHODS(_name, _object_id, ...) \
+ static const struct uverbs_method_def *const UVERBS_OBJECT_METHODS( \
+ _object_id)[] = { __VA_ARGS__ }; \
+ static const struct uverbs_object_def _name##_struct = { \
+ .id = _object_id, \
+ .num_methods = ARRAY_SIZE(UVERBS_OBJECT_METHODS(_object_id)), \
+ .methods = &UVERBS_OBJECT_METHODS(_object_id) \
+ }; \
+ static const struct uverbs_object_def *const _name##_ptrs[] = { \
+ &_name##_struct, \
+ }; \
+ static const struct uverbs_object_tree_def _name = { \
+ .num_objects = 1, \
+ .objects = &_name##_ptrs, \
+ }
-/* A very common use case is that the driver doesn't override the handler and
- * type_attrs. Therefore, we provide a simplified macro for this common case.
+/* Used by drivers to declare a complete parsing tree for a single method that
+ * differs only in having additional driver specific attributes.
*/
-#define ADD_UVERBS_ATTRIBUTES_SIMPLE(_name, _object, _method, ...) \
- ADD_UVERBS_ATTRIBUTES(_name, _object, _method, UVERBS_NO_OVERRIDE, \
- UVERBS_NO_OVERRIDE, ##__VA_ARGS__)
+#define ADD_UVERBS_ATTRIBUTES_SIMPLE(_name, _object_id, _method_id, ...) \
+ static const struct uverbs_attr_def *const UVERBS_METHOD_ATTRS( \
+ _method_id)[] = { __VA_ARGS__ }; \
+ static const struct uverbs_method_def UVERBS_METHOD(_method_id) = { \
+ .id = _method_id, \
+ .num_attrs = ARRAY_SIZE(UVERBS_METHOD_ATTRS(_method_id)), \
+ .attrs = &UVERBS_METHOD_ATTRS(_method_id), \
+ }; \
+ ADD_UVERBS_METHODS(_name, _object_id, &UVERBS_METHOD(_method_id))
#endif
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index 9d56cdb84655..3b00231cc084 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -37,8 +37,6 @@
#include <rdma/uverbs_ioctl.h>
#include <rdma/ib_user_ioctl_verbs.h>
-#define UVERBS_OBJECT(id) uverbs_object_##id
-
#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
const struct uverbs_object_tree_def *uverbs_default_get_objects(void);
#else
@@ -48,34 +46,61 @@ static inline const struct uverbs_object_tree_def *uverbs_default_get_objects(vo
}
#endif
-static inline struct ib_uobject *__uobj_get(const struct uverbs_obj_type *type,
- bool write,
- struct ib_ucontext *ucontext,
- int id)
+/* Returns _id, or causes a compile error if _id is not a u32.
+ *
+ * The uobj APIs should only be used with the write based uAPI to access
+ * object IDs. The write API must use a u32 for the object handle, which is
+ * checked by this macro.
+ */
+#define _uobj_check_id(_id) ((_id) * typecheck(u32, _id))
+
+#define uobj_get_type(_ufile, _object) \
+ uapi_get_object((_ufile)->device->uapi, _object)
+
+#define uobj_get_read(_type, _id, _ufile) \
+ rdma_lookup_get_uobject(uobj_get_type(_ufile, _type), _ufile, \
+ _uobj_check_id(_id), UVERBS_LOOKUP_READ)
+
+#define ufd_get_read(_type, _fdnum, _ufile) \
+ rdma_lookup_get_uobject(uobj_get_type(_ufile, _type), _ufile, \
+ (_fdnum)*typecheck(s32, _fdnum), \
+ UVERBS_LOOKUP_READ)
+
+static inline void *_uobj_get_obj_read(struct ib_uobject *uobj)
{
- return rdma_lookup_get_uobject(type, ucontext, id, write);
+ if (IS_ERR(uobj))
+ return NULL;
+ return uobj->object;
}
+#define uobj_get_obj_read(_object, _type, _id, _ufile) \
+ ((struct ib_##_object *)_uobj_get_obj_read( \
+ uobj_get_read(_type, _id, _ufile)))
-#define uobj_get_type(_object) UVERBS_OBJECT(_object).type_attrs
+#define uobj_get_write(_type, _id, _ufile) \
+ rdma_lookup_get_uobject(uobj_get_type(_ufile, _type), _ufile, \
+ _uobj_check_id(_id), UVERBS_LOOKUP_WRITE)
-#define uobj_get_read(_type, _id, _ucontext) \
- __uobj_get(uobj_get_type(_type), false, _ucontext, _id)
+int __uobj_perform_destroy(const struct uverbs_api_object *obj, u32 id,
+ struct ib_uverbs_file *ufile, int success_res);
+#define uobj_perform_destroy(_type, _id, _ufile, _success_res) \
+ __uobj_perform_destroy(uobj_get_type(_ufile, _type), \
+ _uobj_check_id(_id), _ufile, _success_res)
-#define uobj_get_obj_read(_object, _type, _id, _ucontext) \
-({ \
- struct ib_uobject *__uobj = \
- __uobj_get(uobj_get_type(_type), \
- false, _ucontext, _id); \
- \
- (struct ib_##_object *)(IS_ERR(__uobj) ? NULL : __uobj->object);\
-})
+struct ib_uobject *__uobj_get_destroy(const struct uverbs_api_object *obj,
+ u32 id, struct ib_uverbs_file *ufile);
-#define uobj_get_write(_type, _id, _ucontext) \
- __uobj_get(uobj_get_type(_type), true, _ucontext, _id)
+#define uobj_get_destroy(_type, _id, _ufile) \
+ __uobj_get_destroy(uobj_get_type(_ufile, _type), _uobj_check_id(_id), \
+ _ufile)
+
+static inline void uobj_put_destroy(struct ib_uobject *uobj)
+{
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
+}
static inline void uobj_put_read(struct ib_uobject *uobj)
{
- rdma_lookup_put_uobject(uobj, false);
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_READ);
}
#define uobj_put_obj_read(_obj) \
@@ -83,17 +108,17 @@ static inline void uobj_put_read(struct ib_uobject *uobj)
static inline void uobj_put_write(struct ib_uobject *uobj)
{
- rdma_lookup_put_uobject(uobj, true);
+ rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
}
-static inline int __must_check uobj_remove_commit(struct ib_uobject *uobj)
+static inline int __must_check uobj_alloc_commit(struct ib_uobject *uobj,
+ int success_res)
{
- return rdma_remove_commit_uobject(uobj);
-}
+ int ret = rdma_alloc_commit_uobject(uobj);
-static inline void uobj_alloc_commit(struct ib_uobject *uobj)
-{
- rdma_alloc_commit_uobject(uobj);
+ if (ret)
+ return ret;
+ return success_res;
}
static inline void uobj_alloc_abort(struct ib_uobject *uobj)
@@ -101,14 +126,19 @@ static inline void uobj_alloc_abort(struct ib_uobject *uobj)
rdma_alloc_abort_uobject(uobj);
}
-static inline struct ib_uobject *__uobj_alloc(const struct uverbs_obj_type *type,
- struct ib_ucontext *ucontext)
+static inline struct ib_uobject *
+__uobj_alloc(const struct uverbs_api_object *obj, struct ib_uverbs_file *ufile,
+ struct ib_device **ib_dev)
{
- return rdma_alloc_begin_uobject(type, ucontext);
+ struct ib_uobject *uobj = rdma_alloc_begin_uobject(obj, ufile);
+
+ if (!IS_ERR(uobj))
+ *ib_dev = uobj->context->device;
+ return uobj;
}
-#define uobj_alloc(_type, ucontext) \
- __uobj_alloc(uobj_get_type(_type), ucontext)
+#define uobj_alloc(_type, _ufile, _ib_dev) \
+ __uobj_alloc(uobj_get_type(_ufile, _type), _ufile, _ib_dev)
#endif
diff --git a/include/rdma/uverbs_types.h b/include/rdma/uverbs_types.h
index cc04ec65588d..acb1bfa3cc99 100644
--- a/include/rdma/uverbs_types.h
+++ b/include/rdma/uverbs_types.h
@@ -37,63 +37,72 @@
#include <rdma/ib_verbs.h>
struct uverbs_obj_type;
+struct uverbs_api_object;
-struct uverbs_obj_type_class {
+enum rdma_lookup_mode {
+ UVERBS_LOOKUP_READ,
+ UVERBS_LOOKUP_WRITE,
/*
- * Get an ib_uobject that corresponds to the given id from ucontext,
- * These functions could create or destroy objects if required.
- * The action will be finalized only when commit, abort or put fops are
- * called.
- * The flow of the different actions is:
- * [alloc]: Starts with alloc_begin. The handlers logic is than
- * executed. If the handler is successful, alloc_commit
- * is called and the object is inserted to the repository.
- * Once alloc_commit completes the object is visible to
- * other threads and userspace.
- e Otherwise, alloc_abort is called and the object is
- * destroyed.
- * [lookup]: Starts with lookup_get which fetches and locks the
- * object. After the handler finished using the object, it
- * needs to call lookup_put to unlock it. The exclusive
- * flag indicates if the object is locked for exclusive
- * access.
- * [remove]: Starts with lookup_get with exclusive flag set. This
- * locks the object for exclusive access. If the handler
- * code completed successfully, remove_commit is called
- * and the ib_uobject is removed from the context's
- * uobjects repository and put. The object itself is
- * destroyed as well. Once remove succeeds new krefs to
- * the object cannot be acquired by other threads or
- * userspace and the hardware driver is removed from the
- * object. Other krefs on the object may still exist.
- * If the handler code failed, lookup_put should be
- * called. This callback is used when the context
- * is destroyed as well (process termination,
- * reset flow).
+ * Destroy is like LOOKUP_WRITE, except that the uobject is not
+ * locked. uobj_destroy is used to convert a LOOKUP_DESTROY lock into
+ * a LOOKUP_WRITE lock.
*/
- struct ib_uobject *(*alloc_begin)(const struct uverbs_obj_type *type,
- struct ib_ucontext *ucontext);
- void (*alloc_commit)(struct ib_uobject *uobj);
+ UVERBS_LOOKUP_DESTROY,
+};
+
+/*
+ * The following sequences are valid:
+ * Success flow:
+ * alloc_begin
+ * alloc_commit
+ * [..]
+ * Access flow:
+ * lookup_get(exclusive=false) & uverbs_try_lock_object
+ * lookup_put(exclusive=false) via rdma_lookup_put_uobject
+ * Destruction flow:
+ * lookup_get(exclusive=true) & uverbs_try_lock_object
+ * remove_commit
+ * remove_handle (optional)
+ * lookup_put(exclusive=true) via rdma_lookup_put_uobject
+ *
+ * Allocate Error flow #1
+ * alloc_begin
+ * alloc_abort
+ * Allocate Error flow #2
+ * alloc_begin
+ * remove_commit
+ * alloc_abort
+ * Allocate Error flow #3
+ * alloc_begin
+ * alloc_commit (fails)
+ * remove_commit
+ * alloc_abort
+ *
+ * In all cases the caller must hold the ufile kref until alloc_commit or
+ * alloc_abort returns.
+ */
+struct uverbs_obj_type_class {
+ struct ib_uobject *(*alloc_begin)(const struct uverbs_api_object *obj,
+ struct ib_uverbs_file *ufile);
+ /* This consumes the kref on uobj */
+ int (*alloc_commit)(struct ib_uobject *uobj);
+ /* This does not consume the kref on uobj */
void (*alloc_abort)(struct ib_uobject *uobj);
- struct ib_uobject *(*lookup_get)(const struct uverbs_obj_type *type,
- struct ib_ucontext *ucontext, int id,
- bool exclusive);
- void (*lookup_put)(struct ib_uobject *uobj, bool exclusive);
- /*
- * Must be called with the exclusive lock held. If successful uobj is
- * invalid on return. On failure uobject is left completely
- * unchanged
- */
- int __must_check (*remove_commit)(struct ib_uobject *uobj,
- enum rdma_remove_reason why);
+ struct ib_uobject *(*lookup_get)(const struct uverbs_api_object *obj,
+ struct ib_uverbs_file *ufile, s64 id,
+ enum rdma_lookup_mode mode);
+ void (*lookup_put)(struct ib_uobject *uobj, enum rdma_lookup_mode mode);
+ /* This does not consume the kref on uobj */
+ int __must_check (*destroy_hw)(struct ib_uobject *uobj,
+ enum rdma_remove_reason why);
+ void (*remove_handle)(struct ib_uobject *uobj);
u8 needs_kfree_rcu;
};
struct uverbs_obj_type {
const struct uverbs_obj_type_class * const type_class;
size_t obj_size;
- unsigned int destroy_order;
};
/*
@@ -120,16 +129,15 @@ struct uverbs_obj_idr_type {
enum rdma_remove_reason why);
};
-struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_obj_type *type,
- struct ib_ucontext *ucontext,
- int id, bool exclusive);
-void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive);
-struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_obj_type *type,
- struct ib_ucontext *ucontext);
+struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj,
+ struct ib_uverbs_file *ufile, s64 id,
+ enum rdma_lookup_mode mode);
+void rdma_lookup_put_uobject(struct ib_uobject *uobj,
+ enum rdma_lookup_mode mode);
+struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
+ struct ib_uverbs_file *ufile);
void rdma_alloc_abort_uobject(struct ib_uobject *uobj);
-int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj);
-int rdma_alloc_commit_uobject(struct ib_uobject *uobj);
-int rdma_explicit_destroy(struct ib_uobject *uobject);
+int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj);
struct uverbs_obj_fd_type {
/*
@@ -140,7 +148,7 @@ struct uverbs_obj_fd_type {
* the driver is removed or the process terminated.
*/
struct uverbs_obj_type type;
- int (*context_closed)(struct ib_uobject_file *uobj_file,
+ int (*context_closed)(struct ib_uobject *uobj,
enum rdma_remove_reason why);
const struct file_operations *fops;
const char *name;
@@ -152,30 +160,29 @@ extern const struct uverbs_obj_type_class uverbs_fd_class;
#define UVERBS_BUILD_BUG_ON(cond) (sizeof(char[1 - 2 * !!(cond)]) - \
sizeof(char))
-#define UVERBS_TYPE_ALLOC_FD(_order, _obj_size, _context_closed, _fops, _name, _flags)\
+#define UVERBS_TYPE_ALLOC_FD(_obj_size, _context_closed, _fops, _name, _flags)\
((&((const struct uverbs_obj_fd_type) \
{.type = { \
- .destroy_order = _order, \
.type_class = &uverbs_fd_class, \
.obj_size = (_obj_size) + \
- UVERBS_BUILD_BUG_ON((_obj_size) < sizeof(struct ib_uobject_file)), \
+ UVERBS_BUILD_BUG_ON((_obj_size) < \
+ sizeof(struct ib_uobject)), \
}, \
.context_closed = _context_closed, \
.fops = _fops, \
.name = _name, \
.flags = _flags}))->type)
-#define UVERBS_TYPE_ALLOC_IDR_SZ(_size, _order, _destroy_object) \
+#define UVERBS_TYPE_ALLOC_IDR_SZ(_size, _destroy_object) \
((&((const struct uverbs_obj_idr_type) \
{.type = { \
- .destroy_order = _order, \
.type_class = &uverbs_idr_class, \
.obj_size = (_size) + \
UVERBS_BUILD_BUG_ON((_size) < \
sizeof(struct ib_uobject)) \
}, \
.destroy_object = _destroy_object,}))->type)
-#define UVERBS_TYPE_ALLOC_IDR(_order, _destroy_object) \
- UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uobject), _order, \
+#define UVERBS_TYPE_ALLOC_IDR(_destroy_object) \
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uobject), \
_destroy_object)
#endif
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index 225ab7783dfd..3de3b10da19a 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -161,7 +161,7 @@ struct sata_device {
u8 port_no; /* port number, if this is a PM (Port) */
struct ata_port *ap;
- struct ata_host ata_host;
+ struct ata_host *ata_host;
struct smp_resp rps_resp ____cacheline_aligned; /* report_phy_sata_resp */
u8 fis[ATA_RESP_FIS_SIZE];
};
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index aaf1e971c6a3..c891ada3c5c2 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -4,6 +4,7 @@
#include <linux/dma-mapping.h>
#include <linux/blkdev.h>
+#include <linux/t10-pi.h>
#include <linux/list.h>
#include <linux/types.h>
#include <linux/timer.h>
@@ -14,8 +15,6 @@
struct Scsi_Host;
struct scsi_driver;
-#include <scsi/scsi_device.h>
-
/*
* MAX_COMMAND_SIZE is:
* The longest fixed-length SCSI CDB as per the SCSI standard.
@@ -120,11 +119,11 @@ struct scsi_cmnd {
struct request *request; /* The command we are
working on */
-#define SCSI_SENSE_BUFFERSIZE 96
unsigned char *sense_buffer;
/* obtained by REQUEST SENSE when
* CHECK CONDITION is received on original
- * command (auto-sense) */
+ * command (auto-sense). Length must be
+ * SCSI_SENSE_BUFFERSIZE bytes. */
/* Low-level done function - can be used by low-level driver to point
* to completion function. Not used by mid/upper level code. */
@@ -313,12 +312,6 @@ static inline unsigned int scsi_prot_interval(struct scsi_cmnd *scmd)
return scmd->device->sector_size;
}
-static inline u32 scsi_prot_ref_tag(struct scsi_cmnd *scmd)
-{
- return blk_rq_pos(scmd->request) >>
- (ilog2(scsi_prot_interval(scmd)) - 9) & 0xffffffff;
-}
-
static inline unsigned scsi_prot_sg_count(struct scsi_cmnd *cmd)
{
return cmd->prot_sdb ? cmd->prot_sdb->table.nents : 0;
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 4c36af6edd79..202f4d6a4342 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -17,6 +17,8 @@ struct scsi_sense_hdr;
typedef __u64 __bitwise blist_flags_t;
+#define SCSI_SENSE_BUFFERSIZE 96
+
struct scsi_mode_data {
__u32 length;
__u16 block_descriptor_length;
@@ -426,11 +428,21 @@ extern const char *scsi_device_state_name(enum scsi_device_state);
extern int scsi_is_sdev_device(const struct device *);
extern int scsi_is_target_device(const struct device *);
extern void scsi_sanitize_inquiry_string(unsigned char *s, int len);
-extern int scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
+extern int __scsi_execute(struct scsi_device *sdev, const unsigned char *cmd,
int data_direction, void *buffer, unsigned bufflen,
unsigned char *sense, struct scsi_sense_hdr *sshdr,
int timeout, int retries, u64 flags,
req_flags_t rq_flags, int *resid);
+/* Make sure any sense buffer is the correct size. */
+#define scsi_execute(sdev, cmd, data_direction, buffer, bufflen, sense, \
+ sshdr, timeout, retries, flags, rq_flags, resid) \
+({ \
+ BUILD_BUG_ON((sense) != NULL && \
+ sizeof(sense) != SCSI_SENSE_BUFFERSIZE); \
+ __scsi_execute(sdev, cmd, data_direction, buffer, bufflen, \
+ sense, sshdr, timeout, retries, flags, rq_flags, \
+ resid); \
+})
static inline int scsi_execute_req(struct scsi_device *sdev,
const unsigned char *cmd, int data_direction, void *buffer,
unsigned bufflen, struct scsi_sense_hdr *sshdr, int timeout,
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 53b485fe9b67..5ea06d310a25 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -758,6 +758,7 @@ extern void scsi_scan_host(struct Scsi_Host *);
extern void scsi_rescan_device(struct device *);
extern void scsi_remove_host(struct Scsi_Host *);
extern struct Scsi_Host *scsi_host_get(struct Scsi_Host *);
+extern int scsi_host_busy(struct Scsi_Host *shost);
extern void scsi_host_put(struct Scsi_Host *t);
extern struct Scsi_Host *scsi_host_lookup(unsigned short);
extern const char *scsi_host_state_name(enum scsi_host_state);
diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h
index 8ee8991aa099..c4a5c9e9fb47 100644
--- a/include/soc/bcm2835/raspberrypi-firmware.h
+++ b/include/soc/bcm2835/raspberrypi-firmware.h
@@ -75,6 +75,7 @@ enum rpi_firmware_property_tag {
RPI_FIRMWARE_GET_EDID_BLOCK = 0x00030020,
RPI_FIRMWARE_GET_CUSTOMER_OTP = 0x00030021,
RPI_FIRMWARE_GET_DOMAIN_STATE = 0x00030030,
+ RPI_FIRMWARE_GET_THROTTLED = 0x00030046,
RPI_FIRMWARE_SET_CLOCK_STATE = 0x00038001,
RPI_FIRMWARE_SET_CLOCK_RATE = 0x00038002,
RPI_FIRMWARE_SET_VOLTAGE = 0x00038003,
diff --git a/include/soc/fsl/dpaa2-fd.h b/include/soc/fsl/dpaa2-fd.h
new file mode 100644
index 000000000000..2576abaa7779
--- /dev/null
+++ b/include/soc/fsl/dpaa2-fd.h
@@ -0,0 +1,438 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#ifndef __FSL_DPAA2_FD_H
+#define __FSL_DPAA2_FD_H
+
+#include <linux/kernel.h>
+
+/**
+ * DOC: DPAA2 FD - Frame Descriptor APIs for DPAA2
+ *
+ * Frame Descriptors (FDs) are used to describe frame data in the DPAA2.
+ * Frames can be enqueued and dequeued to Frame Queues (FQs) which are consumed
+ * by the various DPAA accelerators (WRIOP, SEC, PME, DCE)
+ *
+ * There are three types of frames: single, scatter gather, and frame lists.
+ *
+ * The set of APIs in this file must be used to create, manipulate and
+ * query Frame Descriptors.
+ */
+
+/**
+ * struct dpaa2_fd - Struct describing FDs
+ * @words: for easier/faster copying the whole FD structure
+ * @addr: address in the FD
+ * @len: length in the FD
+ * @bpid: buffer pool ID
+ * @format_offset: format, offset, and short-length fields
+ * @frc: frame context
+ * @ctrl: control bits...including dd, sc, va, err, etc
+ * @flc: flow context address
+ *
+ * This structure represents the basic Frame Descriptor used in the system.
+ */
+struct dpaa2_fd {
+ union {
+ u32 words[8];
+ struct dpaa2_fd_simple {
+ __le64 addr;
+ __le32 len;
+ __le16 bpid;
+ __le16 format_offset;
+ __le32 frc;
+ __le32 ctrl;
+ __le64 flc;
+ } simple;
+ };
+};
+
+#define FD_SHORT_LEN_FLAG_MASK 0x1
+#define FD_SHORT_LEN_FLAG_SHIFT 14
+#define FD_SHORT_LEN_MASK 0x3FFFF
+#define FD_OFFSET_MASK 0x0FFF
+#define FD_FORMAT_MASK 0x3
+#define FD_FORMAT_SHIFT 12
+#define FD_BPID_MASK 0x3FFF
+#define SG_SHORT_LEN_FLAG_MASK 0x1
+#define SG_SHORT_LEN_FLAG_SHIFT 14
+#define SG_SHORT_LEN_MASK 0x1FFFF
+#define SG_OFFSET_MASK 0x0FFF
+#define SG_FORMAT_MASK 0x3
+#define SG_FORMAT_SHIFT 12
+#define SG_BPID_MASK 0x3FFF
+#define SG_FINAL_FLAG_MASK 0x1
+#define SG_FINAL_FLAG_SHIFT 15
+
+/* Error bits in FD CTRL */
+#define FD_CTRL_ERR_MASK 0x000000FF
+#define FD_CTRL_UFD 0x00000004
+#define FD_CTRL_SBE 0x00000008
+#define FD_CTRL_FLC 0x00000010
+#define FD_CTRL_FSE 0x00000020
+#define FD_CTRL_FAERR 0x00000040
+
+/* Annotation bits in FD CTRL */
+#define FD_CTRL_PTA 0x00800000
+#define FD_CTRL_PTV1 0x00400000
+
+enum dpaa2_fd_format {
+ dpaa2_fd_single = 0,
+ dpaa2_fd_list,
+ dpaa2_fd_sg
+};
+
+/**
+ * dpaa2_fd_get_addr() - get the addr field of frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the address in the frame descriptor.
+ */
+static inline dma_addr_t dpaa2_fd_get_addr(const struct dpaa2_fd *fd)
+{
+ return (dma_addr_t)le64_to_cpu(fd->simple.addr);
+}
+
+/**
+ * dpaa2_fd_set_addr() - Set the addr field of frame descriptor
+ * @fd: the given frame descriptor
+ * @addr: the address needs to be set in frame descriptor
+ */
+static inline void dpaa2_fd_set_addr(struct dpaa2_fd *fd, dma_addr_t addr)
+{
+ fd->simple.addr = cpu_to_le64(addr);
+}
+
+/**
+ * dpaa2_fd_get_frc() - Get the frame context in the frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the frame context field in the frame descriptor.
+ */
+static inline u32 dpaa2_fd_get_frc(const struct dpaa2_fd *fd)
+{
+ return le32_to_cpu(fd->simple.frc);
+}
+
+/**
+ * dpaa2_fd_set_frc() - Set the frame context in the frame descriptor
+ * @fd: the given frame descriptor
+ * @frc: the frame context needs to be set in frame descriptor
+ */
+static inline void dpaa2_fd_set_frc(struct dpaa2_fd *fd, u32 frc)
+{
+ fd->simple.frc = cpu_to_le32(frc);
+}
+
+/**
+ * dpaa2_fd_get_ctrl() - Get the control bits in the frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the control bits field in the frame descriptor.
+ */
+static inline u32 dpaa2_fd_get_ctrl(const struct dpaa2_fd *fd)
+{
+ return le32_to_cpu(fd->simple.ctrl);
+}
+
+/**
+ * dpaa2_fd_set_ctrl() - Set the control bits in the frame descriptor
+ * @fd: the given frame descriptor
+ * @ctrl: the control bits to be set in the frame descriptor
+ */
+static inline void dpaa2_fd_set_ctrl(struct dpaa2_fd *fd, u32 ctrl)
+{
+ fd->simple.ctrl = cpu_to_le32(ctrl);
+}
+
+/**
+ * dpaa2_fd_get_flc() - Get the flow context in the frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the flow context in the frame descriptor.
+ */
+static inline dma_addr_t dpaa2_fd_get_flc(const struct dpaa2_fd *fd)
+{
+ return (dma_addr_t)le64_to_cpu(fd->simple.flc);
+}
+
+/**
+ * dpaa2_fd_set_flc() - Set the flow context field of frame descriptor
+ * @fd: the given frame descriptor
+ * @flc_addr: the flow context needs to be set in frame descriptor
+ */
+static inline void dpaa2_fd_set_flc(struct dpaa2_fd *fd, dma_addr_t flc_addr)
+{
+ fd->simple.flc = cpu_to_le64(flc_addr);
+}
+
+static inline bool dpaa2_fd_short_len(const struct dpaa2_fd *fd)
+{
+ return !!((le16_to_cpu(fd->simple.format_offset) >>
+ FD_SHORT_LEN_FLAG_SHIFT) & FD_SHORT_LEN_FLAG_MASK);
+}
+
+/**
+ * dpaa2_fd_get_len() - Get the length in the frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the length field in the frame descriptor.
+ */
+static inline u32 dpaa2_fd_get_len(const struct dpaa2_fd *fd)
+{
+ if (dpaa2_fd_short_len(fd))
+ return le32_to_cpu(fd->simple.len) & FD_SHORT_LEN_MASK;
+
+ return le32_to_cpu(fd->simple.len);
+}
+
+/**
+ * dpaa2_fd_set_len() - Set the length field of frame descriptor
+ * @fd: the given frame descriptor
+ * @len: the length needs to be set in frame descriptor
+ */
+static inline void dpaa2_fd_set_len(struct dpaa2_fd *fd, u32 len)
+{
+ fd->simple.len = cpu_to_le32(len);
+}
+
+/**
+ * dpaa2_fd_get_offset() - Get the offset field in the frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the offset.
+ */
+static inline uint16_t dpaa2_fd_get_offset(const struct dpaa2_fd *fd)
+{
+ return le16_to_cpu(fd->simple.format_offset) & FD_OFFSET_MASK;
+}
+
+/**
+ * dpaa2_fd_set_offset() - Set the offset field of frame descriptor
+ * @fd: the given frame descriptor
+ * @offset: the offset needs to be set in frame descriptor
+ */
+static inline void dpaa2_fd_set_offset(struct dpaa2_fd *fd, uint16_t offset)
+{
+ fd->simple.format_offset &= cpu_to_le16(~FD_OFFSET_MASK);
+ fd->simple.format_offset |= cpu_to_le16(offset);
+}
+
+/**
+ * dpaa2_fd_get_format() - Get the format field in the frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the format.
+ */
+static inline enum dpaa2_fd_format dpaa2_fd_get_format(
+ const struct dpaa2_fd *fd)
+{
+ return (enum dpaa2_fd_format)((le16_to_cpu(fd->simple.format_offset)
+ >> FD_FORMAT_SHIFT) & FD_FORMAT_MASK);
+}
+
+/**
+ * dpaa2_fd_set_format() - Set the format field of frame descriptor
+ * @fd: the given frame descriptor
+ * @format: the format needs to be set in frame descriptor
+ */
+static inline void dpaa2_fd_set_format(struct dpaa2_fd *fd,
+ enum dpaa2_fd_format format)
+{
+ fd->simple.format_offset &=
+ cpu_to_le16(~(FD_FORMAT_MASK << FD_FORMAT_SHIFT));
+ fd->simple.format_offset |= cpu_to_le16(format << FD_FORMAT_SHIFT);
+}
+
+/**
+ * dpaa2_fd_get_bpid() - Get the bpid field in the frame descriptor
+ * @fd: the given frame descriptor
+ *
+ * Return the buffer pool id.
+ */
+static inline uint16_t dpaa2_fd_get_bpid(const struct dpaa2_fd *fd)
+{
+ return le16_to_cpu(fd->simple.bpid) & FD_BPID_MASK;
+}
+
+/**
+ * dpaa2_fd_set_bpid() - Set the bpid field of frame descriptor
+ * @fd: the given frame descriptor
+ * @bpid: buffer pool id to be set
+ */
+static inline void dpaa2_fd_set_bpid(struct dpaa2_fd *fd, uint16_t bpid)
+{
+ fd->simple.bpid &= cpu_to_le16(~(FD_BPID_MASK));
+ fd->simple.bpid |= cpu_to_le16(bpid);
+}
+
+/**
+ * struct dpaa2_sg_entry - the scatter-gathering structure
+ * @addr: address of the sg entry
+ * @len: length in this sg entry
+ * @bpid: buffer pool id
+ * @format_offset: format and offset fields
+ */
+struct dpaa2_sg_entry {
+ __le64 addr;
+ __le32 len;
+ __le16 bpid;
+ __le16 format_offset;
+};
+
+enum dpaa2_sg_format {
+ dpaa2_sg_single = 0,
+ dpaa2_sg_frame_data,
+ dpaa2_sg_sgt_ext
+};
+
+/* Accessors for SG entry fields */
+
+/**
+ * dpaa2_sg_get_addr() - Get the address from SG entry
+ * @sg: the given scatter-gathering object
+ *
+ * Return the address.
+ */
+static inline dma_addr_t dpaa2_sg_get_addr(const struct dpaa2_sg_entry *sg)
+{
+ return (dma_addr_t)le64_to_cpu(sg->addr);
+}
+
+/**
+ * dpaa2_sg_set_addr() - Set the address in SG entry
+ * @sg: the given scatter-gathering object
+ * @addr: the address to be set
+ */
+static inline void dpaa2_sg_set_addr(struct dpaa2_sg_entry *sg, dma_addr_t addr)
+{
+ sg->addr = cpu_to_le64(addr);
+}
+
+static inline bool dpaa2_sg_short_len(const struct dpaa2_sg_entry *sg)
+{
+ return !!((le16_to_cpu(sg->format_offset) >> SG_SHORT_LEN_FLAG_SHIFT)
+ & SG_SHORT_LEN_FLAG_MASK);
+}
+
+/**
+ * dpaa2_sg_get_len() - Get the length in SG entry
+ * @sg: the given scatter-gathering object
+ *
+ * Return the length.
+ */
+static inline u32 dpaa2_sg_get_len(const struct dpaa2_sg_entry *sg)
+{
+ if (dpaa2_sg_short_len(sg))
+ return le32_to_cpu(sg->len) & SG_SHORT_LEN_MASK;
+
+ return le32_to_cpu(sg->len);
+}
+
+/**
+ * dpaa2_sg_set_len() - Set the length in SG entry
+ * @sg: the given scatter-gathering object
+ * @len: the length to be set
+ */
+static inline void dpaa2_sg_set_len(struct dpaa2_sg_entry *sg, u32 len)
+{
+ sg->len = cpu_to_le32(len);
+}
+
+/**
+ * dpaa2_sg_get_offset() - Get the offset in SG entry
+ * @sg: the given scatter-gathering object
+ *
+ * Return the offset.
+ */
+static inline u16 dpaa2_sg_get_offset(const struct dpaa2_sg_entry *sg)
+{
+ return le16_to_cpu(sg->format_offset) & SG_OFFSET_MASK;
+}
+
+/**
+ * dpaa2_sg_set_offset() - Set the offset in SG entry
+ * @sg: the given scatter-gathering object
+ * @offset: the offset to be set
+ */
+static inline void dpaa2_sg_set_offset(struct dpaa2_sg_entry *sg,
+ u16 offset)
+{
+ sg->format_offset &= cpu_to_le16(~SG_OFFSET_MASK);
+ sg->format_offset |= cpu_to_le16(offset);
+}
+
+/**
+ * dpaa2_sg_get_format() - Get the SG format in SG entry
+ * @sg: the given scatter-gathering object
+ *
+ * Return the format.
+ */
+static inline enum dpaa2_sg_format
+ dpaa2_sg_get_format(const struct dpaa2_sg_entry *sg)
+{
+ return (enum dpaa2_sg_format)((le16_to_cpu(sg->format_offset)
+ >> SG_FORMAT_SHIFT) & SG_FORMAT_MASK);
+}
+
+/**
+ * dpaa2_sg_set_format() - Set the SG format in SG entry
+ * @sg: the given scatter-gathering object
+ * @format: the format to be set
+ */
+static inline void dpaa2_sg_set_format(struct dpaa2_sg_entry *sg,
+ enum dpaa2_sg_format format)
+{
+ sg->format_offset &= cpu_to_le16(~(SG_FORMAT_MASK << SG_FORMAT_SHIFT));
+ sg->format_offset |= cpu_to_le16(format << SG_FORMAT_SHIFT);
+}
+
+/**
+ * dpaa2_sg_get_bpid() - Get the buffer pool id in SG entry
+ * @sg: the given scatter-gathering object
+ *
+ * Return the bpid.
+ */
+static inline u16 dpaa2_sg_get_bpid(const struct dpaa2_sg_entry *sg)
+{
+ return le16_to_cpu(sg->bpid) & SG_BPID_MASK;
+}
+
+/**
+ * dpaa2_sg_set_bpid() - Set the buffer pool id in SG entry
+ * @sg: the given scatter-gathering object
+ * @bpid: the bpid to be set
+ */
+static inline void dpaa2_sg_set_bpid(struct dpaa2_sg_entry *sg, u16 bpid)
+{
+ sg->bpid &= cpu_to_le16(~(SG_BPID_MASK));
+ sg->bpid |= cpu_to_le16(bpid);
+}
+
+/**
+ * dpaa2_sg_is_final() - Check final bit in SG entry
+ * @sg: the given scatter-gathering object
+ *
+ * Return bool.
+ */
+static inline bool dpaa2_sg_is_final(const struct dpaa2_sg_entry *sg)
+{
+ return !!(le16_to_cpu(sg->format_offset) >> SG_FINAL_FLAG_SHIFT);
+}
+
+/**
+ * dpaa2_sg_set_final() - Set the final bit in SG entry
+ * @sg: the given scatter-gathering object
+ * @final: the final boolean to be set
+ */
+static inline void dpaa2_sg_set_final(struct dpaa2_sg_entry *sg, bool final)
+{
+ sg->format_offset &= cpu_to_le16((~(SG_FINAL_FLAG_MASK
+ << SG_FINAL_FLAG_SHIFT)) & 0xFFFF);
+ sg->format_offset |= cpu_to_le16(final << SG_FINAL_FLAG_SHIFT);
+}
+
+#endif /* __FSL_DPAA2_FD_H */
diff --git a/include/soc/fsl/dpaa2-global.h b/include/soc/fsl/dpaa2-global.h
new file mode 100644
index 000000000000..9bc0713346a8
--- /dev/null
+++ b/include/soc/fsl/dpaa2-global.h
@@ -0,0 +1,177 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright 2016 NXP
+ *
+ */
+#ifndef __FSL_DPAA2_GLOBAL_H
+#define __FSL_DPAA2_GLOBAL_H
+
+#include <linux/types.h>
+#include <linux/cpumask.h>
+#include "dpaa2-fd.h"
+
+struct dpaa2_dq {
+ union {
+ struct common {
+ u8 verb;
+ u8 reserved[63];
+ } common;
+ struct dq {
+ u8 verb;
+ u8 stat;
+ __le16 seqnum;
+ __le16 oprid;
+ u8 reserved;
+ u8 tok;
+ __le32 fqid;
+ u32 reserved2;
+ __le32 fq_byte_cnt;
+ __le32 fq_frm_cnt;
+ __le64 fqd_ctx;
+ u8 fd[32];
+ } dq;
+ struct scn {
+ u8 verb;
+ u8 stat;
+ u8 state;
+ u8 reserved;
+ __le32 rid_tok;
+ __le64 ctx;
+ } scn;
+ };
+};
+
+/* Parsing frame dequeue results */
+/* FQ empty */
+#define DPAA2_DQ_STAT_FQEMPTY 0x80
+/* FQ held active */
+#define DPAA2_DQ_STAT_HELDACTIVE 0x40
+/* FQ force eligible */
+#define DPAA2_DQ_STAT_FORCEELIGIBLE 0x20
+/* valid frame */
+#define DPAA2_DQ_STAT_VALIDFRAME 0x10
+/* FQ ODP enable */
+#define DPAA2_DQ_STAT_ODPVALID 0x04
+/* volatile dequeue */
+#define DPAA2_DQ_STAT_VOLATILE 0x02
+/* volatile dequeue command is expired */
+#define DPAA2_DQ_STAT_EXPIRED 0x01
+
+#define DQ_FQID_MASK 0x00FFFFFF
+#define DQ_FRAME_COUNT_MASK 0x00FFFFFF
+
+/**
+ * dpaa2_dq_flags() - Get the stat field of dequeue response
+ * @dq: the dequeue result.
+ */
+static inline u32 dpaa2_dq_flags(const struct dpaa2_dq *dq)
+{
+ return dq->dq.stat;
+}
+
+/**
+ * dpaa2_dq_is_pull() - Check whether the dq response is from a pull
+ * command.
+ * @dq: the dequeue result
+ *
+ * Return 1 for volatile(pull) dequeue, 0 for static dequeue.
+ */
+static inline int dpaa2_dq_is_pull(const struct dpaa2_dq *dq)
+{
+ return (int)(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_VOLATILE);
+}
+
+/**
+ * dpaa2_dq_is_pull_complete() - Check whether the pull command is completed.
+ * @dq: the dequeue result
+ *
+ * Return boolean.
+ */
+static inline bool dpaa2_dq_is_pull_complete(const struct dpaa2_dq *dq)
+{
+ return !!(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_EXPIRED);
+}
+
+/**
+ * dpaa2_dq_seqnum() - Get the seqnum field in dequeue response
+ * @dq: the dequeue result
+ *
+ * seqnum is valid only if VALIDFRAME flag is TRUE
+ *
+ * Return seqnum.
+ */
+static inline u16 dpaa2_dq_seqnum(const struct dpaa2_dq *dq)
+{
+ return le16_to_cpu(dq->dq.seqnum);
+}
+
+/**
+ * dpaa2_dq_odpid() - Get the odpid field in dequeue response
+ * @dq: the dequeue result
+ *
+ * odpid is valid only if ODPVALID flag is TRUE.
+ *
+ * Return odpid.
+ */
+static inline u16 dpaa2_dq_odpid(const struct dpaa2_dq *dq)
+{
+ return le16_to_cpu(dq->dq.oprid);
+}
+
+/**
+ * dpaa2_dq_fqid() - Get the fqid in dequeue response
+ * @dq: the dequeue result
+ *
+ * Return fqid.
+ */
+static inline u32 dpaa2_dq_fqid(const struct dpaa2_dq *dq)
+{
+ return le32_to_cpu(dq->dq.fqid) & DQ_FQID_MASK;
+}
+
+/**
+ * dpaa2_dq_byte_count() - Get the byte count in dequeue response
+ * @dq: the dequeue result
+ *
+ * Return the byte count remaining in the FQ.
+ */
+static inline u32 dpaa2_dq_byte_count(const struct dpaa2_dq *dq)
+{
+ return le32_to_cpu(dq->dq.fq_byte_cnt);
+}
+
+/**
+ * dpaa2_dq_frame_count() - Get the frame count in dequeue response
+ * @dq: the dequeue result
+ *
+ * Return the frame count remaining in the FQ.
+ */
+static inline u32 dpaa2_dq_frame_count(const struct dpaa2_dq *dq)
+{
+ return le32_to_cpu(dq->dq.fq_frm_cnt) & DQ_FRAME_COUNT_MASK;
+}
+
+/**
+ * dpaa2_dq_fd_ctx() - Get the frame queue context in dequeue response
+ * @dq: the dequeue result
+ *
+ * Return the frame queue context.
+ */
+static inline u64 dpaa2_dq_fqd_ctx(const struct dpaa2_dq *dq)
+{
+ return le64_to_cpu(dq->dq.fqd_ctx);
+}
+
+/**
+ * dpaa2_dq_fd() - Get the frame descriptor in dequeue response
+ * @dq: the dequeue result
+ *
+ * Return the frame descriptor.
+ */
+static inline const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq)
+{
+ return (const struct dpaa2_fd *)&dq->dq.fd[0];
+}
+
+#endif /* __FSL_DPAA2_GLOBAL_H */
diff --git a/include/soc/fsl/dpaa2-io.h b/include/soc/fsl/dpaa2-io.h
new file mode 100644
index 000000000000..ab51e40d11db
--- /dev/null
+++ b/include/soc/fsl/dpaa2-io.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/*
+ * Copyright 2014-2016 Freescale Semiconductor Inc.
+ * Copyright NXP
+ *
+ */
+#ifndef __FSL_DPAA2_IO_H
+#define __FSL_DPAA2_IO_H
+
+#include <linux/types.h>
+#include <linux/cpumask.h>
+#include <linux/irqreturn.h>
+
+#include "dpaa2-fd.h"
+#include "dpaa2-global.h"
+
+struct dpaa2_io;
+struct dpaa2_io_store;
+struct device;
+
+/**
+ * DOC: DPIO Service
+ *
+ * The DPIO service provides APIs for users to interact with the datapath
+ * by enqueueing and dequeing frame descriptors.
+ *
+ * The following set of APIs can be used to enqueue and dequeue frames
+ * as well as producing notification callbacks when data is available
+ * for dequeue.
+ */
+
+#define DPAA2_IO_ANY_CPU -1
+
+/**
+ * struct dpaa2_io_desc - The DPIO descriptor
+ * @receives_notifications: Use notificaton mode. Non-zero if the DPIO
+ * has a channel.
+ * @has_8prio: Set to non-zero for channel with 8 priority WQs. Ignored
+ * unless receives_notification is TRUE.
+ * @cpu: The cpu index that at least interrupt handlers will
+ * execute on.
+ * @stash_affinity: The stash affinity for this portal favour 'cpu'
+ * @regs_cena: The cache enabled regs.
+ * @regs_cinh: The cache inhibited regs
+ * @dpio_id: The dpio index
+ * @qman_version: The qman version
+ *
+ * Describes the attributes and features of the DPIO object.
+ */
+struct dpaa2_io_desc {
+ int receives_notifications;
+ int has_8prio;
+ int cpu;
+ void *regs_cena;
+ void __iomem *regs_cinh;
+ int dpio_id;
+ u32 qman_version;
+};
+
+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc);
+
+void dpaa2_io_down(struct dpaa2_io *d);
+
+irqreturn_t dpaa2_io_irq(struct dpaa2_io *obj);
+
+struct dpaa2_io *dpaa2_io_service_select(int cpu);
+
+/**
+ * struct dpaa2_io_notification_ctx - The DPIO notification context structure
+ * @cb: The callback to be invoked when the notification arrives
+ * @is_cdan: Zero for FQDAN, non-zero for CDAN
+ * @id: FQID or channel ID, needed for rearm
+ * @desired_cpu: The cpu on which the notifications will show up. Use
+ * DPAA2_IO_ANY_CPU if don't care
+ * @dpio_id: The dpio index
+ * @qman64: The 64-bit context value shows up in the FQDAN/CDAN.
+ * @node: The list node
+ * @dpio_private: The dpio object internal to dpio_service
+ *
+ * Used when a FQDAN/CDAN registration is made by drivers.
+ */
+struct dpaa2_io_notification_ctx {
+ void (*cb)(struct dpaa2_io_notification_ctx *ctx);
+ int is_cdan;
+ u32 id;
+ int desired_cpu;
+ int dpio_id;
+ u64 qman64;
+ struct list_head node;
+ void *dpio_private;
+};
+
+int dpaa2_io_service_register(struct dpaa2_io *service,
+ struct dpaa2_io_notification_ctx *ctx);
+void dpaa2_io_service_deregister(struct dpaa2_io *service,
+ struct dpaa2_io_notification_ctx *ctx);
+int dpaa2_io_service_rearm(struct dpaa2_io *service,
+ struct dpaa2_io_notification_ctx *ctx);
+
+int dpaa2_io_service_pull_channel(struct dpaa2_io *d, u32 channelid,
+ struct dpaa2_io_store *s);
+
+int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, u32 qdid, u8 prio,
+ u16 qdbin, const struct dpaa2_fd *fd);
+int dpaa2_io_service_release(struct dpaa2_io *d, u32 bpid,
+ const u64 *buffers, unsigned int num_buffers);
+int dpaa2_io_service_acquire(struct dpaa2_io *d, u32 bpid,
+ u64 *buffers, unsigned int num_buffers);
+
+struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames,
+ struct device *dev);
+void dpaa2_io_store_destroy(struct dpaa2_io_store *s);
+struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last);
+
+#endif /* __FSL_DPAA2_IO_H */
diff --git a/include/soc/qcom/rpmh.h b/include/soc/qcom/rpmh.h
new file mode 100644
index 000000000000..619e07c75da9
--- /dev/null
+++ b/include/soc/qcom/rpmh.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SOC_QCOM_RPMH_H__
+#define __SOC_QCOM_RPMH_H__
+
+#include <soc/qcom/tcs.h>
+#include <linux/platform_device.h>
+
+
+#if IS_ENABLED(CONFIG_QCOM_RPMH)
+int rpmh_write(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n);
+
+int rpmh_write_async(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n);
+
+int rpmh_write_batch(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 *n);
+
+int rpmh_flush(const struct device *dev);
+
+int rpmh_invalidate(const struct device *dev);
+
+#else
+
+static inline int rpmh_write(const struct device *dev, enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n)
+{ return -ENODEV; }
+
+static inline int rpmh_write_async(const struct device *dev,
+ enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 n)
+{ return -ENODEV; }
+
+static inline int rpmh_write_batch(const struct device *dev,
+ enum rpmh_state state,
+ const struct tcs_cmd *cmd, u32 *n)
+{ return -ENODEV; }
+
+static inline int rpmh_flush(const struct device *dev)
+{ return -ENODEV; }
+
+static inline int rpmh_invalidate(const struct device *dev)
+{ return -ENODEV; }
+
+#endif /* CONFIG_QCOM_RPMH */
+
+#endif /* __SOC_QCOM_RPMH_H__ */
diff --git a/include/soc/qcom/tcs.h b/include/soc/qcom/tcs.h
new file mode 100644
index 000000000000..262876a59e86
--- /dev/null
+++ b/include/soc/qcom/tcs.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef __SOC_QCOM_TCS_H__
+#define __SOC_QCOM_TCS_H__
+
+#define MAX_RPMH_PAYLOAD 16
+
+/**
+ * rpmh_state: state for the request
+ *
+ * RPMH_SLEEP_STATE: State of the resource when the processor subsystem
+ * is powered down. There is no client using the
+ * resource actively.
+ * RPMH_WAKE_ONLY_STATE: Resume resource state to the value previously
+ * requested before the processor was powered down.
+ * RPMH_ACTIVE_ONLY_STATE: Active or AMC mode requests. Resource state
+ * is aggregated immediately.
+ */
+enum rpmh_state {
+ RPMH_SLEEP_STATE,
+ RPMH_WAKE_ONLY_STATE,
+ RPMH_ACTIVE_ONLY_STATE,
+};
+
+/**
+ * struct tcs_cmd: an individual request to RPMH.
+ *
+ * @addr: the address of the resource slv_id:18:16 | offset:0:15
+ * @data: the resource state request
+ * @wait: wait for this request to be complete before sending the next
+ */
+struct tcs_cmd {
+ u32 addr;
+ u32 data;
+ u32 wait;
+};
+
+/**
+ * struct tcs_request: A set of tcs_cmds sent together in a TCS
+ *
+ * @state: state for the request.
+ * @wait_for_compl: wait until we get a response from the h/w accelerator
+ * @num_cmds: the number of @cmds in this request
+ * @cmds: an array of tcs_cmds
+ */
+struct tcs_request {
+ enum rpmh_state state;
+ u32 wait_for_compl;
+ u32 num_cmds;
+ struct tcs_cmd *cmds;
+};
+
+#endif /* __SOC_QCOM_TCS_H__ */
diff --git a/include/sound/ac97/codec.h b/include/sound/ac97/codec.h
index ec04be9ab119..9792d25fa369 100644
--- a/include/sound/ac97/codec.h
+++ b/include/sound/ac97/codec.h
@@ -1,10 +1,8 @@
-/*
- * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
+/* SPDX-License-Identifier: GPL-2.0
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
*/
+
#ifndef __SOUND_AC97_CODEC2_H
#define __SOUND_AC97_CODEC2_H
diff --git a/include/sound/ac97/compat.h b/include/sound/ac97/compat.h
index 1351cba40048..57e19afa31ab 100644
--- a/include/sound/ac97/compat.h
+++ b/include/sound/ac97/compat.h
@@ -1,14 +1,11 @@
-/*
- * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
+/* SPDX-License-Identifier: GPL-2.0
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
*
* This file is for backward compatibility with snd_ac97 structure and its
* multiple usages, such as the snd_ac97_bus and snd_ac97_build_ops.
- *
*/
+
#ifndef AC97_COMPAT_H
#define AC97_COMPAT_H
diff --git a/include/sound/ac97/controller.h b/include/sound/ac97/controller.h
index b36ecdd64f14..06b5afb7fa6b 100644
--- a/include/sound/ac97/controller.h
+++ b/include/sound/ac97/controller.h
@@ -1,10 +1,8 @@
-/*
- * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
+/* SPDX-License-Identifier: GPL-2.0
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Copyright (C) 2016 Robert Jarzmik <robert.jarzmik@free.fr>
*/
+
#ifndef AC97_CONTROLLER_H
#define AC97_CONTROLLER_H
diff --git a/include/sound/ac97/regs.h b/include/sound/ac97/regs.h
index 9a4fa0c3264a..843f73f3705a 100644
--- a/include/sound/ac97/regs.h
+++ b/include/sound/ac97/regs.h
@@ -1,27 +1,11 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0+
+ *
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* Universal interface for Audio Codec '97
*
* For more details look to AC '97 component specification revision 2.1
* by Intel Corporation (http://developer.intel.com).
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
-
/*
* AC'97 codec registers
*/
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index 89d311a503d3..cc383991c0fe 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -1,30 +1,15 @@
-#ifndef __SOUND_AC97_CODEC_H
-#define __SOUND_AC97_CODEC_H
-
-/*
+/* SPDX-License-Identifier: GPL-2.0+
+ *
* Copyright (c) by Jaroslav Kysela <perex@perex.cz>
* Universal interface for Audio Codec '97
*
* For more details look to AC '97 component specification revision 2.1
* by Intel Corporation (http://developer.intel.com).
- *
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
*/
+#ifndef __SOUND_AC97_CODEC_H
+#define __SOUND_AC97_CODEC_H
+
#include <linux/bitops.h>
#include <linux/device.h>
#include <linux/workqueue.h>
diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
index 9924bc9cbc7c..ea8c93bbb0e0 100644
--- a/include/sound/compress_driver.h
+++ b/include/sound/compress_driver.h
@@ -1,27 +1,12 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* compress_driver.h - compress offload driver definations
*
* Copyright (C) 2011 Intel Corporation
* Authors: Vinod Koul <vinod.koul@linux.intel.com>
* Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
- *
- * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
- *
*/
+
#ifndef __COMPRESS_DRIVER_H
#define __COMPRESS_DRIVER_H
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index e3481eebdd98..2c4cfaa135a6 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -1,17 +1,9 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0+
+ *
* Copyright (C) 2012, Analog Devices Inc.
* Author: Lars-Peter Clausen <lars@metafoo.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- *
*/
+
#ifndef __SOUND_DMAENGINE_PCM_H__
#define __SOUND_DMAENGINE_PCM_H__
diff --git a/include/sound/hda_component.h b/include/sound/hda_component.h
new file mode 100644
index 000000000000..78626cde7081
--- /dev/null
+++ b/include/sound/hda_component.h
@@ -0,0 +1,61 @@
+// SPDX-License-Identifier: GPL-2.0
+// HD-Audio helpers to sync with DRM driver
+
+#ifndef __SOUND_HDA_COMPONENT_H
+#define __SOUND_HDA_COMPONENT_H
+
+#include <drm/drm_audio_component.h>
+
+#ifdef CONFIG_SND_HDA_COMPONENT
+int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
+int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
+int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid,
+ int dev_id, int rate);
+int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
+ bool *audio_enabled, char *buffer, int max_bytes);
+int snd_hdac_acomp_init(struct hdac_bus *bus,
+ const struct drm_audio_component_audio_ops *aops,
+ int (*match_master)(struct device *, void *),
+ size_t extra_size);
+int snd_hdac_acomp_exit(struct hdac_bus *bus);
+int snd_hdac_acomp_register_notifier(struct hdac_bus *bus,
+ const struct drm_audio_component_audio_ops *ops);
+#else
+static inline int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
+{
+ return 0;
+}
+static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
+{
+ return 0;
+}
+static inline int snd_hdac_sync_audio_rate(struct hdac_device *codec,
+ hda_nid_t nid, int dev_id, int rate)
+{
+ return 0;
+}
+static inline int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
+ int dev_id, bool *audio_enabled,
+ char *buffer, int max_bytes)
+{
+ return -ENODEV;
+}
+static inline int snd_hdac_acomp_init(struct hdac_bus *bus,
+ const struct drm_audio_component_audio_ops *aops,
+ int (*match_master)(struct device *, void *),
+ size_t extra_size)
+{
+ return -ENODEV;
+}
+static inline int snd_hdac_acomp_exit(struct hdac_bus *bus)
+{
+ return 0;
+}
+static inline int snd_hdac_acomp_register_notifier(struct hdac_bus *bus,
+ const struct drm_audio_component_audio_ops *ops)
+{
+ return -ENODEV;
+}
+#endif
+
+#endif /* __SOUND_HDA_COMPONENT_H */
diff --git a/include/sound/hda_i915.h b/include/sound/hda_i915.h
index a94f5b6f92ac..6b79614a893b 100644
--- a/include/sound/hda_i915.h
+++ b/include/sound/hda_i915.h
@@ -5,54 +5,23 @@
#ifndef __SOUND_HDA_I915_H
#define __SOUND_HDA_I915_H
-#include <drm/i915_component.h>
+#include "hda_component.h"
#ifdef CONFIG_SND_HDA_I915
-int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable);
-int snd_hdac_display_power(struct hdac_bus *bus, bool enable);
void snd_hdac_i915_set_bclk(struct hdac_bus *bus);
-int snd_hdac_sync_audio_rate(struct hdac_device *codec, hda_nid_t nid,
- int dev_id, int rate);
-int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid, int dev_id,
- bool *audio_enabled, char *buffer, int max_bytes);
int snd_hdac_i915_init(struct hdac_bus *bus);
-int snd_hdac_i915_exit(struct hdac_bus *bus);
-int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *);
#else
-static inline int snd_hdac_set_codec_wakeup(struct hdac_bus *bus, bool enable)
-{
- return 0;
-}
-static inline int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
-{
- return 0;
-}
static inline void snd_hdac_i915_set_bclk(struct hdac_bus *bus)
{
}
-static inline int snd_hdac_sync_audio_rate(struct hdac_device *codec,
- hda_nid_t nid, int dev_id, int rate)
-{
- return 0;
-}
-static inline int snd_hdac_acomp_get_eld(struct hdac_device *codec, hda_nid_t nid,
- int dev_id, bool *audio_enabled,
- char *buffer, int max_bytes)
-{
- return -ENODEV;
-}
static inline int snd_hdac_i915_init(struct hdac_bus *bus)
{
return -ENODEV;
}
+#endif
static inline int snd_hdac_i915_exit(struct hdac_bus *bus)
{
- return 0;
+ return snd_hdac_acomp_exit(bus);
}
-static inline int snd_hdac_i915_register_notifier(const struct i915_audio_component_audio_ops *ops)
-{
- return -ENODEV;
-}
-#endif
#endif /* __SOUND_HDA_I915_H */
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index c052afc27547..6f1e1f3b3063 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -8,8 +8,10 @@
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/pm_runtime.h>
#include <linux/timecounter.h>
#include <sound/core.h>
+#include <sound/pcm.h>
#include <sound/memalloc.h>
#include <sound/hda_verbs.h>
#include <drm/i915_component.h>
@@ -132,7 +134,7 @@ int snd_hdac_get_sub_nodes(struct hdac_device *codec, hda_nid_t nid,
hda_nid_t *start_id);
unsigned int snd_hdac_calc_stream_format(unsigned int rate,
unsigned int channels,
- unsigned int format,
+ snd_pcm_format_t format,
unsigned int maxbps,
unsigned short spdif_ctls);
int snd_hdac_query_supported_pcm(struct hdac_device *codec, hda_nid_t nid,
@@ -171,12 +173,38 @@ int snd_hdac_power_down(struct hdac_device *codec);
int snd_hdac_power_up_pm(struct hdac_device *codec);
int snd_hdac_power_down_pm(struct hdac_device *codec);
int snd_hdac_keep_power_up(struct hdac_device *codec);
+
+/* call this at entering into suspend/resume callbacks in codec driver */
+static inline void snd_hdac_enter_pm(struct hdac_device *codec)
+{
+ atomic_inc(&codec->in_pm);
+}
+
+/* call this at leaving from suspend/resume callbacks in codec driver */
+static inline void snd_hdac_leave_pm(struct hdac_device *codec)
+{
+ atomic_dec(&codec->in_pm);
+}
+
+static inline bool snd_hdac_is_in_pm(struct hdac_device *codec)
+{
+ return atomic_read(&codec->in_pm);
+}
+
+static inline bool snd_hdac_is_power_on(struct hdac_device *codec)
+{
+ return !pm_runtime_suspended(&codec->dev);
+}
#else
static inline int snd_hdac_power_up(struct hdac_device *codec) { return 0; }
static inline int snd_hdac_power_down(struct hdac_device *codec) { return 0; }
static inline int snd_hdac_power_up_pm(struct hdac_device *codec) { return 0; }
static inline int snd_hdac_power_down_pm(struct hdac_device *codec) { return 0; }
static inline int snd_hdac_keep_power_up(struct hdac_device *codec) { return 0; }
+static inline void snd_hdac_enter_pm(struct hdac_device *codec) {}
+static inline void snd_hdac_leave_pm(struct hdac_device *codec) {}
+static inline bool snd_hdac_is_in_pm(struct hdac_device *codec) { return 0; }
+static inline bool snd_hdac_is_power_on(struct hdac_device *codec) { return 1; }
#endif
/*
@@ -188,6 +216,11 @@ struct hdac_driver {
const struct hda_device_id *id_table;
int (*match)(struct hdac_device *dev, struct hdac_driver *drv);
void (*unsol_event)(struct hdac_device *dev, unsigned int event);
+
+ /* fields used by ext bus APIs */
+ int (*probe)(struct hdac_device *dev);
+ int (*remove)(struct hdac_device *dev);
+ void (*shutdown)(struct hdac_device *dev);
};
#define drv_to_hdac_driver(_drv) container_of(_drv, struct hdac_driver, driver)
@@ -209,6 +242,14 @@ struct hdac_bus_ops {
};
/*
+ * ops used for ASoC HDA codec drivers
+ */
+struct hdac_ext_bus_ops {
+ int (*hdev_attach)(struct hdac_device *hdev);
+ int (*hdev_detach)(struct hdac_device *hdev);
+};
+
+/*
* Lowlevel I/O operators
*/
struct hdac_io_ops {
@@ -250,11 +291,17 @@ struct hdac_rb {
* @mlcap: MultiLink capabilities pointer
* @gtscap: gts capabilities pointer
* @drsmcap: dma resume capabilities pointer
+ * @num_streams: streams supported
+ * @idx: HDA link index
+ * @hlink_list: link list of HDA links
+ * @lock: lock for link mgmt
+ * @cmd_dma_state: state of cmd DMAs: CORB and RIRB
*/
struct hdac_bus {
struct device *dev;
const struct hdac_bus_ops *ops;
const struct hdac_io_ops *io_ops;
+ const struct hdac_ext_bus_ops *ext_ops;
/* h/w resources */
unsigned long addr;
@@ -314,9 +361,19 @@ struct hdac_bus {
spinlock_t reg_lock;
struct mutex cmd_mutex;
- /* i915 component interface */
- struct i915_audio_component *audio_component;
- int i915_power_refcount;
+ /* DRM component interface */
+ struct drm_audio_component *audio_component;
+ int drm_power_refcount;
+
+ /* parameters required for enhanced capabilities */
+ int num_streams;
+ int idx;
+
+ struct list_head hlink_list;
+
+ struct mutex lock;
+ bool cmd_dma_state;
+
};
int snd_hdac_bus_init(struct hdac_bus *bus, struct device *dev,
diff --git a/include/sound/hdaudio_ext.h b/include/sound/hdaudio_ext.h
index 9c14e21dda85..f34aced69ca8 100644
--- a/include/sound/hdaudio_ext.h
+++ b/include/sound/hdaudio_ext.h
@@ -4,38 +4,16 @@
#include <sound/hdaudio.h>
-/**
- * hdac_ext_bus: HDAC extended bus for extended HDA caps
- *
- * @bus: hdac bus
- * @num_streams: streams supported
- * @hlink_list: link list of HDA links
- * @lock: lock for link mgmt
- * @cmd_dma_state: state of cmd DMAs: CORB and RIRB
- */
-struct hdac_ext_bus {
- struct hdac_bus bus;
- int num_streams;
- int idx;
-
- struct list_head hlink_list;
-
- struct mutex lock;
- bool cmd_dma_state;
-};
-
-int snd_hdac_ext_bus_init(struct hdac_ext_bus *sbus, struct device *dev,
+int snd_hdac_ext_bus_init(struct hdac_bus *bus, struct device *dev,
const struct hdac_bus_ops *ops,
- const struct hdac_io_ops *io_ops);
+ const struct hdac_io_ops *io_ops,
+ const struct hdac_ext_bus_ops *ext_ops);
-void snd_hdac_ext_bus_exit(struct hdac_ext_bus *sbus);
-int snd_hdac_ext_bus_device_init(struct hdac_ext_bus *sbus, int addr);
+void snd_hdac_ext_bus_exit(struct hdac_bus *bus);
+int snd_hdac_ext_bus_device_init(struct hdac_bus *bus, int addr,
+ struct hdac_device *hdev);
void snd_hdac_ext_bus_device_exit(struct hdac_device *hdev);
-void snd_hdac_ext_bus_device_remove(struct hdac_ext_bus *ebus);
-
-#define ebus_to_hbus(ebus) (&(ebus)->bus)
-#define hbus_to_ebus(_bus) \
- container_of(_bus, struct hdac_ext_bus, bus)
+void snd_hdac_ext_bus_device_remove(struct hdac_bus *bus);
#define HDA_CODEC_REV_EXT_ENTRY(_vid, _rev, _name, drv_data) \
{ .vendor_id = (_vid), .rev_id = (_rev), .name = (_name), \
@@ -44,14 +22,14 @@ void snd_hdac_ext_bus_device_remove(struct hdac_ext_bus *ebus);
#define HDA_CODEC_EXT_ENTRY(_vid, _revid, _name, _drv_data) \
HDA_CODEC_REV_EXT_ENTRY(_vid, _revid, _name, _drv_data)
-void snd_hdac_ext_bus_ppcap_enable(struct hdac_ext_bus *chip, bool enable);
-void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_ext_bus *chip, bool enable);
+void snd_hdac_ext_bus_ppcap_enable(struct hdac_bus *chip, bool enable);
+void snd_hdac_ext_bus_ppcap_int_enable(struct hdac_bus *chip, bool enable);
-void snd_hdac_ext_stream_spbcap_enable(struct hdac_ext_bus *chip,
+void snd_hdac_ext_stream_spbcap_enable(struct hdac_bus *chip,
bool enable, int index);
-int snd_hdac_ext_bus_get_ml_capabilities(struct hdac_ext_bus *bus);
-struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_ext_bus *bus,
+int snd_hdac_ext_bus_get_ml_capabilities(struct hdac_bus *bus);
+struct hdac_ext_link *snd_hdac_ext_bus_get_link(struct hdac_bus *bus,
const char *codec_name);
enum hdac_ext_stream_type {
@@ -100,28 +78,28 @@ struct hdac_ext_stream {
#define stream_to_hdac_ext_stream(s) \
container_of(s, struct hdac_ext_stream, hstream)
-void snd_hdac_ext_stream_init(struct hdac_ext_bus *bus,
+void snd_hdac_ext_stream_init(struct hdac_bus *bus,
struct hdac_ext_stream *stream, int idx,
int direction, int tag);
-int snd_hdac_ext_stream_init_all(struct hdac_ext_bus *ebus, int start_idx,
+int snd_hdac_ext_stream_init_all(struct hdac_bus *bus, int start_idx,
int num_stream, int dir);
-void snd_hdac_stream_free_all(struct hdac_ext_bus *ebus);
-void snd_hdac_link_free_all(struct hdac_ext_bus *ebus);
-struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_ext_bus *bus,
+void snd_hdac_stream_free_all(struct hdac_bus *bus);
+void snd_hdac_link_free_all(struct hdac_bus *bus);
+struct hdac_ext_stream *snd_hdac_ext_stream_assign(struct hdac_bus *bus,
struct snd_pcm_substream *substream,
int type);
void snd_hdac_ext_stream_release(struct hdac_ext_stream *azx_dev, int type);
-void snd_hdac_ext_stream_decouple(struct hdac_ext_bus *bus,
+void snd_hdac_ext_stream_decouple(struct hdac_bus *bus,
struct hdac_ext_stream *azx_dev, bool decouple);
-void snd_hdac_ext_stop_streams(struct hdac_ext_bus *sbus);
+void snd_hdac_ext_stop_streams(struct hdac_bus *bus);
-int snd_hdac_ext_stream_set_spib(struct hdac_ext_bus *ebus,
+int snd_hdac_ext_stream_set_spib(struct hdac_bus *bus,
struct hdac_ext_stream *stream, u32 value);
-int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_ext_bus *ebus,
+int snd_hdac_ext_stream_get_spbmaxfifo(struct hdac_bus *bus,
struct hdac_ext_stream *stream);
-void snd_hdac_ext_stream_drsm_enable(struct hdac_ext_bus *ebus,
+void snd_hdac_ext_stream_drsm_enable(struct hdac_bus *bus,
bool enable, int index);
-int snd_hdac_ext_stream_set_dpibr(struct hdac_ext_bus *ebus,
+int snd_hdac_ext_stream_set_dpibr(struct hdac_bus *bus,
struct hdac_ext_stream *stream, u32 value);
int snd_hdac_ext_stream_set_lpib(struct hdac_ext_stream *stream, u32 value);
@@ -144,17 +122,15 @@ struct hdac_ext_link {
int snd_hdac_ext_bus_link_power_up(struct hdac_ext_link *link);
int snd_hdac_ext_bus_link_power_down(struct hdac_ext_link *link);
-int snd_hdac_ext_bus_link_power_up_all(struct hdac_ext_bus *ebus);
-int snd_hdac_ext_bus_link_power_down_all(struct hdac_ext_bus *ebus);
+int snd_hdac_ext_bus_link_power_up_all(struct hdac_bus *bus);
+int snd_hdac_ext_bus_link_power_down_all(struct hdac_bus *bus);
void snd_hdac_ext_link_set_stream_id(struct hdac_ext_link *link,
int stream);
void snd_hdac_ext_link_clear_stream_id(struct hdac_ext_link *link,
int stream);
-int snd_hdac_ext_bus_link_get(struct hdac_ext_bus *ebus,
- struct hdac_ext_link *link);
-int snd_hdac_ext_bus_link_put(struct hdac_ext_bus *ebus,
- struct hdac_ext_link *link);
+int snd_hdac_ext_bus_link_get(struct hdac_bus *bus, struct hdac_ext_link *link);
+int snd_hdac_ext_bus_link_put(struct hdac_bus *bus, struct hdac_ext_link *link);
/* update register macro */
#define snd_hdac_updatel(addr, reg, mask, val) \
@@ -181,53 +157,12 @@ struct hda_dai_map {
u32 maxbps;
};
-#define HDA_MAX_NIDS 16
-
-/**
- * struct hdac_ext_device - HDAC Ext device
- *
- * @hdac: hdac core device
- * @nid_list - the dai map which matches the dai-name with the nid
- * @map_cur_idx - the idx in use in dai_map
- * @ops - the hda codec ops common to all codec drivers
- * @pvt_data - private data, for asoc contains asoc codec object
- */
-struct hdac_ext_device {
- struct hdac_device hdev;
- struct hdac_ext_bus *ebus;
-
- /* soc-dai to nid map */
- struct hda_dai_map nid_list[HDA_MAX_NIDS];
- unsigned int map_cur_idx;
-
- /* codec ops */
- struct hdac_ext_codec_ops ops;
-
- struct snd_card *card;
- void *scodec;
- void *private_data;
-};
-
struct hdac_ext_dma_params {
u32 format;
u8 stream_tag;
};
-#define to_ehdac_device(dev) (container_of((dev), \
- struct hdac_ext_device, hdev))
-/*
- * HD-audio codec base driver
- */
-struct hdac_ext_driver {
- struct hdac_driver hdac;
-
- int (*probe)(struct hdac_ext_device *dev);
- int (*remove)(struct hdac_ext_device *dev);
- void (*shutdown)(struct hdac_ext_device *dev);
-};
-
-int snd_hda_ext_driver_register(struct hdac_ext_driver *drv);
-void snd_hda_ext_driver_unregister(struct hdac_ext_driver *drv);
-#define to_ehdac_driver(_drv) container_of(_drv, struct hdac_ext_driver, hdac)
+int snd_hda_ext_driver_register(struct hdac_driver *drv);
+void snd_hda_ext_driver_unregister(struct hdac_driver *drv);
#endif /* __SOUND_HDAUDIO_EXT_H */
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 9c3db3dce32b..67561b997915 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -24,6 +24,8 @@
#ifndef __SOUND_MEMALLOC_H
#define __SOUND_MEMALLOC_H
+#include <asm/page.h>
+
struct device;
/*
@@ -67,6 +69,14 @@ struct snd_dma_buffer {
void *private_data; /* private for allocator; don't touch */
};
+/*
+ * return the pages matching with the given byte size
+ */
+static inline unsigned int snd_sgbuf_aligned_pages(size_t size)
+{
+ return (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+}
+
#ifdef CONFIG_SND_DMA_SGBUF
/*
* Scatter-Gather generic device pages
@@ -91,14 +101,6 @@ struct snd_sg_buf {
};
/*
- * return the pages matching with the given byte size
- */
-static inline unsigned int snd_sgbuf_aligned_pages(size_t size)
-{
- return (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
-}
-
-/*
* return the physical address at the corresponding offset
*/
static inline dma_addr_t snd_sgbuf_get_addr(struct snd_dma_buffer *dmab,
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index e054c583d3b3..d6bd3caf6878 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -462,6 +462,7 @@ struct snd_pcm_substream {
/* -- timer section -- */
struct snd_timer *timer; /* timer */
unsigned timer_running: 1; /* time is running */
+ long wait_time; /* time in ms for R/W to wait for avail */
/* -- next substream -- */
struct snd_pcm_substream *next;
/* -- linked substreams -- */
@@ -1089,14 +1090,14 @@ static inline snd_pcm_sframes_t
snd_pcm_lib_write(struct snd_pcm_substream *substream,
const void __user *buf, snd_pcm_uframes_t frames)
{
- return __snd_pcm_lib_xfer(substream, (void *)buf, true, frames, false);
+ return __snd_pcm_lib_xfer(substream, (void __force *)buf, true, frames, false);
}
static inline snd_pcm_sframes_t
snd_pcm_lib_read(struct snd_pcm_substream *substream,
void __user *buf, snd_pcm_uframes_t frames)
{
- return __snd_pcm_lib_xfer(substream, (void *)buf, true, frames, false);
+ return __snd_pcm_lib_xfer(substream, (void __force *)buf, true, frames, false);
}
static inline snd_pcm_sframes_t
@@ -1341,8 +1342,6 @@ int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_s
#define snd_pcm_lib_mmap_iomem NULL
#endif
-#define snd_pcm_lib_mmap_vmalloc NULL
-
/**
* snd_pcm_limit_isa_dma_size - Get the max size fitting with ISA DMA transfer
* @dma: DMA number
diff --git a/include/sound/pcm_params.h b/include/sound/pcm_params.h
index c704357775fc..2dd37cada7c0 100644
--- a/include/sound/pcm_params.h
+++ b/include/sound/pcm_params.h
@@ -87,6 +87,13 @@ static inline void snd_mask_set(struct snd_mask *mask, unsigned int val)
mask->bits[MASK_OFS(val)] |= MASK_BIT(val);
}
+/* Most of drivers need only this one */
+static inline void snd_mask_set_format(struct snd_mask *mask,
+ snd_pcm_format_t format)
+{
+ snd_mask_set(mask, (__force unsigned int)format);
+}
+
static inline void snd_mask_reset(struct snd_mask *mask, unsigned int val)
{
mask->bits[MASK_OFS(val)] &= ~MASK_BIT(val);
@@ -369,8 +376,7 @@ static inline int params_physical_width(const struct snd_pcm_hw_params *p)
static inline void
params_set_format(struct snd_pcm_hw_params *p, snd_pcm_format_t fmt)
{
- snd_mask_set(hw_param_mask(p, SNDRV_PCM_HW_PARAM_FORMAT),
- (__force int)fmt);
+ snd_mask_set_format(hw_param_mask(p, SNDRV_PCM_HW_PARAM_FORMAT), fmt);
}
#endif /* __SOUND_PCM_PARAMS_H */
diff --git a/include/sound/pxa2xx-lib.h b/include/sound/pxa2xx-lib.h
index 63f75450d3db..6758fc12fa84 100644
--- a/include/sound/pxa2xx-lib.h
+++ b/include/sound/pxa2xx-lib.h
@@ -8,20 +8,23 @@
/* PCM */
struct snd_pcm_substream;
struct snd_pcm_hw_params;
+struct snd_soc_pcm_runtime;
struct snd_pcm;
-extern int __pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
+extern int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params);
-extern int __pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream);
+extern int pxa2xx_pcm_hw_free(struct snd_pcm_substream *substream);
extern int pxa2xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd);
extern snd_pcm_uframes_t pxa2xx_pcm_pointer(struct snd_pcm_substream *substream);
-extern int __pxa2xx_pcm_prepare(struct snd_pcm_substream *substream);
-extern int __pxa2xx_pcm_open(struct snd_pcm_substream *substream);
-extern int __pxa2xx_pcm_close(struct snd_pcm_substream *substream);
+extern int pxa2xx_pcm_prepare(struct snd_pcm_substream *substream);
+extern int pxa2xx_pcm_open(struct snd_pcm_substream *substream);
+extern int pxa2xx_pcm_close(struct snd_pcm_substream *substream);
extern int pxa2xx_pcm_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma);
extern int pxa2xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream);
extern void pxa2xx_pcm_free_dma_buffers(struct snd_pcm *pcm);
+extern int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd);
+extern const struct snd_pcm_ops pxa2xx_pcm_ops;
/* AC97 */
diff --git a/include/sound/rt5682.h b/include/sound/rt5682.h
new file mode 100644
index 000000000000..0251797ab438
--- /dev/null
+++ b/include/sound/rt5682.h
@@ -0,0 +1,40 @@
+/*
+ * linux/sound/rt5682.h -- Platform data for RT5682
+ *
+ * Copyright 2018 Realtek Microelectronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_SND_RT5682_H
+#define __LINUX_SND_RT5682_H
+
+enum rt5682_dmic1_data_pin {
+ RT5682_DMIC1_NULL,
+ RT5682_DMIC1_DATA_GPIO2,
+ RT5682_DMIC1_DATA_GPIO5,
+};
+
+enum rt5682_dmic1_clk_pin {
+ RT5682_DMIC1_CLK_GPIO1,
+ RT5682_DMIC1_CLK_GPIO3,
+};
+
+enum rt5682_jd_src {
+ RT5682_JD_NULL,
+ RT5682_JD1,
+};
+
+struct rt5682_platform_data {
+
+ int ldo1_en; /* GPIO for LDO1_EN */
+
+ enum rt5682_dmic1_data_pin dmic1_data_pin;
+ enum rt5682_dmic1_clk_pin dmic1_clk_pin;
+ enum rt5682_jd_src jd_src;
+};
+
+#endif
+
diff --git a/include/sound/sb16_csp.h b/include/sound/sb16_csp.h
index c7c7788005e4..7817e88bd08d 100644
--- a/include/sound/sb16_csp.h
+++ b/include/sound/sb16_csp.h
@@ -46,7 +46,7 @@ enum {
struct snd_sb_csp_ops {
int (*csp_use) (struct snd_sb_csp * p);
int (*csp_unuse) (struct snd_sb_csp * p);
- int (*csp_autoload) (struct snd_sb_csp * p, int pcm_sfmt, int play_rec_mode);
+ int (*csp_autoload) (struct snd_sb_csp * p, snd_pcm_format_t pcm_sfmt, int play_rec_mode);
int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
int (*csp_stop) (struct snd_sb_csp * p);
int (*csp_qsound_transfer) (struct snd_sb_csp * p);
diff --git a/include/sound/seq_midi_event.h b/include/sound/seq_midi_event.h
index e40f43e6fc7b..2f135bccf457 100644
--- a/include/sound/seq_midi_event.h
+++ b/include/sound/seq_midi_event.h
@@ -43,10 +43,8 @@ void snd_midi_event_free(struct snd_midi_event *dev);
void snd_midi_event_reset_encode(struct snd_midi_event *dev);
void snd_midi_event_reset_decode(struct snd_midi_event *dev);
void snd_midi_event_no_status(struct snd_midi_event *dev, int on);
-/* encode from byte stream - return number of written bytes if success */
-long snd_midi_event_encode(struct snd_midi_event *dev, unsigned char *buf, long count,
- struct snd_seq_event *ev);
-int snd_midi_event_encode_byte(struct snd_midi_event *dev, int c, struct snd_seq_event *ev);
+bool snd_midi_event_encode_byte(struct snd_midi_event *dev, unsigned char c,
+ struct snd_seq_event *ev);
/* decode from event to bytes - return number of written bytes if success */
long snd_midi_event_decode(struct snd_midi_event *dev, unsigned char *buf, long count,
struct snd_seq_event *ev);
diff --git a/include/sound/seq_virmidi.h b/include/sound/seq_virmidi.h
index 695257ae64ac..796ce7772213 100644
--- a/include/sound/seq_virmidi.h
+++ b/include/sound/seq_virmidi.h
@@ -36,11 +36,12 @@ struct snd_virmidi {
int seq_mode;
int client;
int port;
- unsigned int trigger: 1;
+ bool trigger;
struct snd_midi_event *parser;
struct snd_seq_event event;
struct snd_virmidi_dev *rdev;
struct snd_rawmidi_substream *substream;
+ struct work_struct output_work;
};
#define SNDRV_VIRMIDI_SUBSCRIBE (1<<0)
diff --git a/include/sound/sh_fsi.h b/include/sound/sh_fsi.h
index 7a9710b4b799..89eafe23ef88 100644
--- a/include/sound/sh_fsi.h
+++ b/include/sound/sh_fsi.h
@@ -1,16 +1,13 @@
-#ifndef __SOUND_FSI_H
-#define __SOUND_FSI_H
-
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* Fifo-attached Serial Interface (FSI) support for SH7724
*
* Copyright (C) 2009 Renesas Solutions Corp.
* Kuninori Morimoto <morimoto.kuninori@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
+#ifndef __SOUND_FSI_H
+#define __SOUND_FSI_H
+
#include <linux/clk.h>
#include <sound/soc.h>
diff --git a/include/sound/simple_card.h b/include/sound/simple_card.h
index a6a2e1547092..d264e5463f22 100644
--- a/include/sound/simple_card.h
+++ b/include/sound/simple_card.h
@@ -1,12 +1,9 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* ASoC simple sound card support
*
* Copyright (C) 2012 Renesas Solutions Corp.
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __SIMPLE_CARD_H
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 7e25afce6566..8bc5e2d8b13c 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -1,17 +1,20 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* simple_card_utils.h
*
* Copyright (c) 2016 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
+
#ifndef __SIMPLE_CARD_UTILS_H
#define __SIMPLE_CARD_UTILS_H
#include <sound/soc.h>
+#define asoc_simple_card_init_hp(card, sjack, prefix) \
+ asoc_simple_card_init_jack(card, sjack, 1, prefix)
+#define asoc_simple_card_init_mic(card, sjack, prefix) \
+ asoc_simple_card_init_jack(card, sjack, 0, prefix)
+
struct asoc_simple_dai {
const char *name;
unsigned int sysclk;
@@ -28,6 +31,12 @@ struct asoc_simple_card_data {
u32 convert_channels;
};
+struct asoc_simple_jack {
+ struct snd_soc_jack jack;
+ struct snd_soc_jack_pin pin;
+ struct snd_soc_jack_gpio gpio;
+};
+
int asoc_simple_card_parse_daifmt(struct device *dev,
struct device_node *node,
struct device_node *codec,
@@ -107,4 +116,8 @@ int asoc_simple_card_of_parse_routing(struct snd_soc_card *card,
int asoc_simple_card_of_parse_widgets(struct snd_soc_card *card,
char *prefix);
+int asoc_simple_card_init_jack(struct snd_soc_card *card,
+ struct asoc_simple_jack *sjack,
+ int is_hp, char *prefix);
+
#endif /* __SIMPLE_CARD_UTILS_H */
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index 9da6388c20a1..bb1d24b703fb 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -1,16 +1,6 @@
-
-/*
- * Copyright (C) 2017, Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+/* SPDX-License-Identifier: GPL-2.0
*
+ * Copyright (C) 2017, Intel Corporation. All rights reserved.
*/
#ifndef __LINUX_SND_SOC_ACPI_INTEL_MATCH_H
@@ -29,5 +19,10 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_broadwell_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_legacy_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_baytrail_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cherrytrail_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_skl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[];
#endif
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index 082224275f52..e45b2330d16a 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -1,15 +1,6 @@
-/*
- * Copyright (C) 2013-15, Intel Corporation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License version
- * 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
+/* SPDX-License-Identifier: GPL-2.0
*
+ * Copyright (C) 2013-15, Intel Corporation. All rights reserved.
*/
#ifndef __LINUX_SND_SOC_ACPI_H
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index e6f8c40ed43c..f5d70041108f 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -1,12 +1,9 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* linux/sound/soc-dai.h -- ALSA SoC Layer
*
* Copyright: 2005-2008 Wolfson Microelectronics. PLC.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Digital Audio Interface (DAI) API.
*/
@@ -141,6 +138,11 @@ int snd_soc_dai_set_tristate(struct snd_soc_dai *dai, int tristate);
int snd_soc_dai_digital_mute(struct snd_soc_dai *dai, int mute,
int direction);
+
+int snd_soc_dai_get_channel_map(struct snd_soc_dai *dai,
+ unsigned int *tx_num, unsigned int *tx_slot,
+ unsigned int *rx_num, unsigned int *rx_slot);
+
int snd_soc_dai_is_dummy(struct snd_soc_dai *dai);
struct snd_soc_dai_ops {
@@ -168,6 +170,9 @@ struct snd_soc_dai_ops {
int (*set_channel_map)(struct snd_soc_dai *dai,
unsigned int tx_num, unsigned int *tx_slot,
unsigned int rx_num, unsigned int *rx_slot);
+ int (*get_channel_map)(struct snd_soc_dai *dai,
+ unsigned int *tx_num, unsigned int *tx_slot,
+ unsigned int *rx_num, unsigned int *rx_slot);
int (*set_tristate)(struct snd_soc_dai *dai, int tristate);
int (*set_sdw_stream)(struct snd_soc_dai *dai,
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index a6ce2de4e20a..af9ef16cc34d 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -1,13 +1,10 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* linux/sound/soc-dapm.h -- ALSA SoC Dynamic Audio Power Management
*
- * Author: Liam Girdwood
- * Created: Aug 11th 2005
+ * Author: Liam Girdwood
+ * Created: Aug 11th 2005
* Copyright: Wolfson Microelectronics. PLC.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_SND_SOC_DAPM_H
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index 806059052bfc..9bb92f187af8 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -1,11 +1,8 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* linux/sound/soc-dpcm.h -- ALSA SoC Dynamic PCM Support
*
* Author: Liam Girdwood <lrg@ti.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_SND_SOC_DPCM_H
diff --git a/include/sound/soc-topology.h b/include/sound/soc-topology.h
index f552c3f56368..fa4b8413d2e2 100644
--- a/include/sound/soc-topology.h
+++ b/include/sound/soc-topology.h
@@ -1,13 +1,10 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* linux/sound/soc-topology.h -- ALSA SoC Firmware Controls and DAPM
*
* Copyright (C) 2012 Texas Instruments Inc.
* Copyright (C) 2015 Intel Corporation.
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Simple file API to load FW that includes mixers, coefficients, DAPM graphs,
* algorithms, equalisers, DAIs, widgets, FE caps, BE caps, codec link caps etc.
*/
@@ -30,6 +27,9 @@ struct snd_soc_dapm_context;
struct snd_soc_card;
struct snd_kcontrol_new;
struct snd_soc_dai_link;
+struct snd_soc_dai_driver;
+struct snd_soc_dai;
+struct snd_soc_dapm_route;
/* object scan be loaded and unloaded in groups with identfying indexes */
#define SND_SOC_TPLG_INDEX_ALL 0 /* ID that matches all FW objects */
@@ -109,35 +109,44 @@ struct snd_soc_tplg_widget_events {
struct snd_soc_tplg_ops {
/* external kcontrol init - used for any driver specific init */
- int (*control_load)(struct snd_soc_component *,
+ int (*control_load)(struct snd_soc_component *, int index,
struct snd_kcontrol_new *, struct snd_soc_tplg_ctl_hdr *);
int (*control_unload)(struct snd_soc_component *,
struct snd_soc_dobj *);
+ /* DAPM graph route element loading and unloading */
+ int (*dapm_route_load)(struct snd_soc_component *, int index,
+ struct snd_soc_dapm_route *route);
+ int (*dapm_route_unload)(struct snd_soc_component *,
+ struct snd_soc_dobj *);
+
/* external widget init - used for any driver specific init */
- int (*widget_load)(struct snd_soc_component *,
+ int (*widget_load)(struct snd_soc_component *, int index,
struct snd_soc_dapm_widget *,
struct snd_soc_tplg_dapm_widget *);
- int (*widget_ready)(struct snd_soc_component *,
+ int (*widget_ready)(struct snd_soc_component *, int index,
struct snd_soc_dapm_widget *,
struct snd_soc_tplg_dapm_widget *);
int (*widget_unload)(struct snd_soc_component *,
struct snd_soc_dobj *);
/* FE DAI - used for any driver specific init */
- int (*dai_load)(struct snd_soc_component *,
- struct snd_soc_dai_driver *dai_drv);
+ int (*dai_load)(struct snd_soc_component *, int index,
+ struct snd_soc_dai_driver *dai_drv,
+ struct snd_soc_tplg_pcm *pcm, struct snd_soc_dai *dai);
+
int (*dai_unload)(struct snd_soc_component *,
struct snd_soc_dobj *);
/* DAI link - used for any driver specific init */
- int (*link_load)(struct snd_soc_component *,
- struct snd_soc_dai_link *link);
+ int (*link_load)(struct snd_soc_component *, int index,
+ struct snd_soc_dai_link *link,
+ struct snd_soc_tplg_link_config *cfg);
int (*link_unload)(struct snd_soc_component *,
struct snd_soc_dobj *);
/* callback to handle vendor bespoke data */
- int (*vendor_load)(struct snd_soc_component *,
+ int (*vendor_load)(struct snd_soc_component *, int index,
struct snd_soc_tplg_hdr *);
int (*vendor_unload)(struct snd_soc_component *,
struct snd_soc_tplg_hdr *);
@@ -146,7 +155,7 @@ struct snd_soc_tplg_ops {
void (*complete)(struct snd_soc_component *);
/* manifest - optional to inform component of manifest */
- int (*manifest)(struct snd_soc_component *,
+ int (*manifest)(struct snd_soc_component *, int index,
struct snd_soc_tplg_manifest *);
/* vendor specific kcontrol handlers available for binding */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 1378dcd2128a..41cec42fb456 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -1,13 +1,10 @@
-/*
+/* SPDX-License-Identifier: GPL-2.0
+ *
* linux/sound/soc.h -- ALSA SoC Layer
*
- * Author: Liam Girdwood
- * Created: Aug 11th 2005
+ * Author: Liam Girdwood
+ * Created: Aug 11th 2005
* Copyright: Wolfson Microelectronics. PLC.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __LINUX_SND_SOC_H
@@ -806,6 +803,14 @@ struct snd_soc_component_driver {
unsigned int use_pmdown_time:1; /* care pmdown_time at stop */
unsigned int endianness:1;
unsigned int non_legacy_dai_naming:1;
+
+ /* this component uses topology and ignore machine driver FEs */
+ const char *ignore_machine;
+ const char *topology_name_prefix;
+ int (*be_hw_params_fixup)(struct snd_soc_pcm_runtime *rtd,
+ struct snd_pcm_hw_params *params);
+ bool use_dai_pcm_id; /* use the DAI link PCM ID as PCM device number */
+ int be_pcm_base; /* base device ID for all BE PCMs */
};
struct snd_soc_component {
@@ -957,10 +962,17 @@ struct snd_soc_dai_link {
/* DPCM used FE & BE merged format */
unsigned int dpcm_merged_format:1;
+ /* DPCM used FE & BE merged channel */
+ unsigned int dpcm_merged_chan:1;
+ /* DPCM used FE & BE merged rate */
+ unsigned int dpcm_merged_rate:1;
/* pmdown_time is ignored at stop */
unsigned int ignore_pmdown_time:1;
+ /* Do not create a PCM for this DAI link (Backend link) */
+ unsigned int ignore:1;
+
struct list_head list; /* DAI link list of the soc card */
struct snd_soc_dobj dobj; /* For topology */
};
@@ -1000,6 +1012,7 @@ struct snd_soc_card {
const char *long_name;
const char *driver_name;
char dmi_longname[80];
+ char topology_shortname[32];
struct device *dev;
struct snd_card *snd_card;
@@ -1009,6 +1022,7 @@ struct snd_soc_card {
struct mutex dapm_mutex;
bool instantiated;
+ bool topology_shortname_created;
int (*probe)(struct snd_soc_card *card);
int (*late_probe)(struct snd_soc_card *card);
@@ -1412,6 +1426,9 @@ int snd_soc_of_parse_card_name(struct snd_soc_card *card,
const char *propname);
int snd_soc_of_parse_audio_simple_widgets(struct snd_soc_card *card,
const char *propname);
+int snd_soc_of_get_slot_mask(struct device_node *np,
+ const char *prop_name,
+ unsigned int *mask);
int snd_soc_of_parse_tdm_slot(struct device_node *np,
unsigned int *tx_mask,
unsigned int *rx_mask,
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index cf5f3fff1f1a..f2e6abea8490 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -4,6 +4,7 @@
#include <linux/dma-direction.h> /* enum dma_data_direction */
#include <linux/list.h> /* struct list_head */
+#include <linux/sched.h>
#include <linux/socket.h> /* struct sockaddr_storage */
#include <linux/types.h> /* u8 */
#include <scsi/iscsi_proto.h> /* itt_t */
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 34a15d59ed88..51b6f50eabee 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -106,13 +106,15 @@ bool target_lun_is_rdonly(struct se_cmd *);
sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
-struct se_device *target_find_device(int id, bool do_depend);
-
bool target_sense_desc_format(struct se_device *dev);
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
struct request_queue *q);
+static inline bool target_dev_configured(struct se_device *se_dev)
+{
+ return !!(se_dev->dev_flags & DF_CONFIGURED);
+}
/* Only use get_unaligned_be24() if reading p - 1 is allowed. */
static inline uint32_t get_unaligned_be24(const uint8_t *const p)
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 922a39f45abc..7a4ee7852ca4 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -4,7 +4,7 @@
#include <linux/configfs.h> /* struct config_group */
#include <linux/dma-direction.h> /* enum dma_data_direction */
-#include <linux/percpu_ida.h> /* struct percpu_ida */
+#include <linux/sbitmap.h>
#include <linux/percpu-refcount.h>
#include <linux/semaphore.h> /* struct semaphore */
#include <linux/completion.h>
@@ -443,7 +443,6 @@ struct se_cmd {
u8 scsi_asc;
u8 scsi_ascq;
u16 scsi_sense_length;
- unsigned cmd_wait_set:1;
unsigned unknown_data_length:1;
bool state_active:1;
u64 tag; /* SAM command identifier aka task tag */
@@ -455,6 +454,7 @@ struct se_cmd {
int sam_task_attr;
/* Used for se_sess->sess_tag_pool */
unsigned int map_tag;
+ int map_cpu;
/* Transport protocol dependent state, see transport_state_table */
enum transport_state_table t_state;
/* See se_cmd_flags_table */
@@ -475,7 +475,7 @@ struct se_cmd {
struct se_session *se_sess;
struct se_tmr_req *se_tmr_req;
struct list_head se_cmd_list;
- struct completion cmd_wait_comp;
+ struct completion *compl;
const struct target_core_fabric_ops *se_tfo;
sense_reason_t (*execute_cmd)(struct se_cmd *);
sense_reason_t (*transport_complete_callback)(struct se_cmd *, bool, int *);
@@ -605,10 +605,10 @@ struct se_session {
struct list_head sess_list;
struct list_head sess_acl_list;
struct list_head sess_cmd_list;
- struct list_head sess_wait_list;
spinlock_t sess_cmd_lock;
+ wait_queue_head_t cmd_list_wq;
void *sess_cmd_map;
- struct percpu_ida sess_tag_pool;
+ struct sbitmap_queue sess_tag_pool;
};
struct se_device;
@@ -638,7 +638,6 @@ struct se_dev_entry {
atomic_long_t total_cmds;
atomic_long_t read_bytes;
atomic_long_t write_bytes;
- atomic_t ua_count;
/* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
struct kref pr_kref;
struct completion pr_comp;
@@ -934,4 +933,9 @@ static inline void atomic_dec_mb(atomic_t *v)
smp_mb__after_atomic();
}
+static inline void target_free_tag(struct se_session *sess, struct se_cmd *cmd)
+{
+ sbitmap_queue_clear(&sess->sess_tag_pool, cmd->map_tag, cmd->map_cpu);
+}
+
#endif /* TARGET_CORE_BASE_H */
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index b297aa0d9651..f4147b398431 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -79,7 +79,7 @@ struct target_core_fabric_ops {
void (*fabric_drop_wwn)(struct se_wwn *);
void (*add_wwn_groups)(struct se_wwn *);
struct se_portal_group *(*fabric_make_tpg)(struct se_wwn *,
- struct config_group *, const char *);
+ const char *);
void (*fabric_drop_tpg)(struct se_portal_group *);
int (*fabric_post_link)(struct se_portal_group *,
struct se_lun *);
@@ -109,17 +109,17 @@ void target_unregister_template(const struct target_core_fabric_ops *fo);
int target_depend_item(struct config_item *item);
void target_undepend_item(struct config_item *item);
-struct se_session *target_alloc_session(struct se_portal_group *,
+struct se_session *target_setup_session(struct se_portal_group *,
unsigned int, unsigned int, enum target_prot_op prot_op,
const char *, void *,
int (*callback)(struct se_portal_group *,
struct se_session *, void *));
+void target_remove_session(struct se_session *);
-struct se_session *transport_init_session(enum target_prot_op);
+void transport_init_session(struct se_session *);
+struct se_session *transport_alloc_session(enum target_prot_op);
int transport_alloc_session_tags(struct se_session *, unsigned int,
unsigned int);
-struct se_session *transport_init_session_tags(unsigned int, unsigned int,
- enum target_prot_op);
void __transport_register_session(struct se_portal_group *,
struct se_node_acl *, struct se_session *, void *);
void transport_register_session(struct se_portal_group *,
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 39b94ec965be..b401c4e36394 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -374,7 +374,7 @@ DECLARE_EVENT_CLASS(
__entry->extent_type = btrfs_file_extent_type(l, fi);
__entry->compression = btrfs_file_extent_compression(l, fi);
__entry->extent_start = start;
- __entry->extent_end = (start + btrfs_file_extent_inline_len(l, slot, fi));
+ __entry->extent_end = (start + btrfs_file_extent_ram_bytes(l, fi));
),
TP_printk_btrfs(
@@ -433,7 +433,6 @@ DEFINE_EVENT(
{ (1 << BTRFS_ORDERED_DIRECT), "DIRECT" }, \
{ (1 << BTRFS_ORDERED_IOERR), "IOERR" }, \
{ (1 << BTRFS_ORDERED_UPDATED_ISIZE), "UPDATED_ISIZE" }, \
- { (1 << BTRFS_ORDERED_LOGGED_CSUM), "LOGGED_CSUM" }, \
{ (1 << BTRFS_ORDERED_TRUNCATED), "TRUNCATED" })
diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h
index d74722c2ac8b..a401ff5e7847 100644
--- a/include/trace/events/cgroup.h
+++ b/include/trace/events/cgroup.h
@@ -53,24 +53,22 @@ DEFINE_EVENT(cgroup_root, cgroup_remount,
DECLARE_EVENT_CLASS(cgroup,
- TP_PROTO(struct cgroup *cgrp),
+ TP_PROTO(struct cgroup *cgrp, const char *path),
- TP_ARGS(cgrp),
+ TP_ARGS(cgrp, path),
TP_STRUCT__entry(
__field( int, root )
__field( int, id )
__field( int, level )
- __dynamic_array(char, path,
- cgroup_path(cgrp, NULL, 0) + 1)
+ __string( path, path )
),
TP_fast_assign(
__entry->root = cgrp->root->hierarchy_id;
__entry->id = cgrp->id;
__entry->level = cgrp->level;
- cgroup_path(cgrp, __get_dynamic_array(path),
- __get_dynamic_array_len(path));
+ __assign_str(path, path);
),
TP_printk("root=%d id=%d level=%d path=%s",
@@ -79,45 +77,45 @@ DECLARE_EVENT_CLASS(cgroup,
DEFINE_EVENT(cgroup, cgroup_mkdir,
- TP_PROTO(struct cgroup *cgroup),
+ TP_PROTO(struct cgroup *cgrp, const char *path),
- TP_ARGS(cgroup)
+ TP_ARGS(cgrp, path)
);
DEFINE_EVENT(cgroup, cgroup_rmdir,
- TP_PROTO(struct cgroup *cgroup),
+ TP_PROTO(struct cgroup *cgrp, const char *path),
- TP_ARGS(cgroup)
+ TP_ARGS(cgrp, path)
);
DEFINE_EVENT(cgroup, cgroup_release,
- TP_PROTO(struct cgroup *cgroup),
+ TP_PROTO(struct cgroup *cgrp, const char *path),
- TP_ARGS(cgroup)
+ TP_ARGS(cgrp, path)
);
DEFINE_EVENT(cgroup, cgroup_rename,
- TP_PROTO(struct cgroup *cgroup),
+ TP_PROTO(struct cgroup *cgrp, const char *path),
- TP_ARGS(cgroup)
+ TP_ARGS(cgrp, path)
);
DECLARE_EVENT_CLASS(cgroup_migrate,
- TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup),
+ TP_PROTO(struct cgroup *dst_cgrp, const char *path,
+ struct task_struct *task, bool threadgroup),
- TP_ARGS(dst_cgrp, task, threadgroup),
+ TP_ARGS(dst_cgrp, path, task, threadgroup),
TP_STRUCT__entry(
__field( int, dst_root )
__field( int, dst_id )
__field( int, dst_level )
- __dynamic_array(char, dst_path,
- cgroup_path(dst_cgrp, NULL, 0) + 1)
__field( int, pid )
+ __string( dst_path, path )
__string( comm, task->comm )
),
@@ -125,8 +123,7 @@ DECLARE_EVENT_CLASS(cgroup_migrate,
__entry->dst_root = dst_cgrp->root->hierarchy_id;
__entry->dst_id = dst_cgrp->id;
__entry->dst_level = dst_cgrp->level;
- cgroup_path(dst_cgrp, __get_dynamic_array(dst_path),
- __get_dynamic_array_len(dst_path));
+ __assign_str(dst_path, path);
__entry->pid = task->pid;
__assign_str(comm, task->comm);
),
@@ -138,16 +135,18 @@ DECLARE_EVENT_CLASS(cgroup_migrate,
DEFINE_EVENT(cgroup_migrate, cgroup_attach_task,
- TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup),
+ TP_PROTO(struct cgroup *dst_cgrp, const char *path,
+ struct task_struct *task, bool threadgroup),
- TP_ARGS(dst_cgrp, task, threadgroup)
+ TP_ARGS(dst_cgrp, path, task, threadgroup)
);
DEFINE_EVENT(cgroup_migrate, cgroup_transfer_tasks,
- TP_PROTO(struct cgroup *dst_cgrp, struct task_struct *task, bool threadgroup),
+ TP_PROTO(struct cgroup *dst_cgrp, const char *path,
+ struct task_struct *task, bool threadgroup),
- TP_ARGS(dst_cgrp, task, threadgroup)
+ TP_ARGS(dst_cgrp, path, task, threadgroup)
);
#endif /* _TRACE_CGROUP_H */
diff --git a/include/trace/events/clk.h b/include/trace/events/clk.h
index 2cd449328aee..9004ffff7f32 100644
--- a/include/trace/events/clk.h
+++ b/include/trace/events/clk.h
@@ -192,6 +192,42 @@ DEFINE_EVENT(clk_phase, clk_set_phase_complete,
TP_ARGS(core, phase)
);
+DECLARE_EVENT_CLASS(clk_duty_cycle,
+
+ TP_PROTO(struct clk_core *core, struct clk_duty *duty),
+
+ TP_ARGS(core, duty),
+
+ TP_STRUCT__entry(
+ __string( name, core->name )
+ __field( unsigned int, num )
+ __field( unsigned int, den )
+ ),
+
+ TP_fast_assign(
+ __assign_str(name, core->name);
+ __entry->num = duty->num;
+ __entry->den = duty->den;
+ ),
+
+ TP_printk("%s %u/%u", __get_str(name), (unsigned int)__entry->num,
+ (unsigned int)__entry->den)
+);
+
+DEFINE_EVENT(clk_duty_cycle, clk_set_duty_cycle,
+
+ TP_PROTO(struct clk_core *core, struct clk_duty *duty),
+
+ TP_ARGS(core, duty)
+);
+
+DEFINE_EVENT(clk_duty_cycle, clk_set_duty_cycle_complete,
+
+ TP_PROTO(struct clk_core *core, struct clk_duty *duty),
+
+ TP_ARGS(core, duty)
+);
+
#endif /* _TRACE_CLK_H */
/* This part must be outside protection */
diff --git a/include/trace/events/fib.h b/include/trace/events/fib.h
index 9763cddd0594..6271bab63bfb 100644
--- a/include/trace/events/fib.h
+++ b/include/trace/events/fib.h
@@ -22,6 +22,7 @@ TRACE_EVENT(fib_table_lookup,
__field( int, err )
__field( int, oif )
__field( int, iif )
+ __field( u8, proto )
__field( __u8, tos )
__field( __u8, scope )
__field( __u8, flags )
@@ -31,7 +32,6 @@ TRACE_EVENT(fib_table_lookup,
__array( __u8, saddr, 4 )
__field( u16, sport )
__field( u16, dport )
- __field( u8, proto )
__dynamic_array(char, name, IFNAMSIZ )
),
diff --git a/include/trace/events/filelock.h b/include/trace/events/filelock.h
index d1faf3597b9d..68b17c116907 100644
--- a/include/trace/events/filelock.h
+++ b/include/trace/events/filelock.h
@@ -112,8 +112,11 @@ DEFINE_EVENT(filelock_lock, locks_remove_posix,
TP_PROTO(struct inode *inode, struct file_lock *fl, int ret),
TP_ARGS(inode, fl, ret));
-DECLARE_EVENT_CLASS(filelock_lease,
+DEFINE_EVENT(filelock_lock, flock_lock_inode,
+ TP_PROTO(struct inode *inode, struct file_lock *fl, int ret),
+ TP_ARGS(inode, fl, ret));
+DECLARE_EVENT_CLASS(filelock_lease,
TP_PROTO(struct inode *inode, struct file_lock *fl),
TP_ARGS(inode, fl),
diff --git a/include/trace/events/fsi_master_ast_cf.h b/include/trace/events/fsi_master_ast_cf.h
new file mode 100644
index 000000000000..a0fdfa58622a
--- /dev/null
+++ b/include/trace/events/fsi_master_ast_cf.h
@@ -0,0 +1,150 @@
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fsi_master_ast_cf
+
+#if !defined(_TRACE_FSI_MASTER_ACF_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FSI_MASTER_ACF_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(fsi_master_acf_copro_command,
+ TP_PROTO(const struct fsi_master_acf *master, uint32_t op),
+ TP_ARGS(master, op),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(uint32_t, op)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->master.idx;
+ __entry->op = op;
+ ),
+ TP_printk("fsi-acf%d command %08x",
+ __entry->master_idx, __entry->op
+ )
+);
+
+TRACE_EVENT(fsi_master_acf_send_request,
+ TP_PROTO(const struct fsi_master_acf *master, const struct fsi_msg *cmd, u8 rbits),
+ TP_ARGS(master, cmd, rbits),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(uint64_t, msg)
+ __field(u8, bits)
+ __field(u8, rbits)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->master.idx;
+ __entry->msg = cmd->msg;
+ __entry->bits = cmd->bits;
+ __entry->rbits = rbits;
+ ),
+ TP_printk("fsi-acf%d cmd: %016llx/%d/%d",
+ __entry->master_idx, (unsigned long long)__entry->msg,
+ __entry->bits, __entry->rbits
+ )
+);
+
+TRACE_EVENT(fsi_master_acf_copro_response,
+ TP_PROTO(const struct fsi_master_acf *master, u8 rtag, u8 rcrc, __be32 rdata, bool crc_ok),
+ TP_ARGS(master, rtag, rcrc, rdata, crc_ok),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(u8, rtag)
+ __field(u8, rcrc)
+ __field(u32, rdata)
+ __field(bool, crc_ok)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->master.idx;
+ __entry->rtag = rtag;
+ __entry->rcrc = rcrc;
+ __entry->rdata = be32_to_cpu(rdata);
+ __entry->crc_ok = crc_ok;
+ ),
+ TP_printk("fsi-acf%d rsp: tag=%04x crc=%04x data=%08x %c\n",
+ __entry->master_idx, __entry->rtag, __entry->rcrc,
+ __entry->rdata, __entry->crc_ok ? ' ' : '!'
+ )
+);
+
+TRACE_EVENT(fsi_master_acf_crc_rsp_error,
+ TP_PROTO(const struct fsi_master_acf *master, int retries),
+ TP_ARGS(master, retries),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, retries)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->master.idx;
+ __entry->retries = retries;
+ ),
+ TP_printk("fsi-acf%d CRC error in response retry %d",
+ __entry->master_idx, __entry->retries
+ )
+);
+
+TRACE_EVENT(fsi_master_acf_poll_response_busy,
+ TP_PROTO(const struct fsi_master_acf *master, int busy_count),
+ TP_ARGS(master, busy_count),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, busy_count)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->master.idx;
+ __entry->busy_count = busy_count;
+ ),
+ TP_printk("fsi-acf%d: device reported busy %d times",
+ __entry->master_idx, __entry->busy_count
+ )
+);
+
+TRACE_EVENT(fsi_master_acf_cmd_abs_addr,
+ TP_PROTO(const struct fsi_master_acf *master, u32 addr),
+ TP_ARGS(master, addr),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(u32, addr)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->master.idx;
+ __entry->addr = addr;
+ ),
+ TP_printk("fsi-acf%d: Sending ABS_ADR %06x",
+ __entry->master_idx, __entry->addr
+ )
+);
+
+TRACE_EVENT(fsi_master_acf_cmd_rel_addr,
+ TP_PROTO(const struct fsi_master_acf *master, u32 rel_addr),
+ TP_ARGS(master, rel_addr),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(u32, rel_addr)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->master.idx;
+ __entry->rel_addr = rel_addr;
+ ),
+ TP_printk("fsi-acf%d: Sending REL_ADR %03x",
+ __entry->master_idx, __entry->rel_addr
+ )
+);
+
+TRACE_EVENT(fsi_master_acf_cmd_same_addr,
+ TP_PROTO(const struct fsi_master_acf *master),
+ TP_ARGS(master),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->master.idx;
+ ),
+ TP_printk("fsi-acf%d: Sending SAME_ADR",
+ __entry->master_idx
+ )
+);
+
+#endif /* _TRACE_FSI_MASTER_ACF_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/fsi_master_gpio.h b/include/trace/events/fsi_master_gpio.h
index f95cf3264bf9..70ef66e63e84 100644
--- a/include/trace/events/fsi_master_gpio.h
+++ b/include/trace/events/fsi_master_gpio.h
@@ -50,6 +50,22 @@ TRACE_EVENT(fsi_master_gpio_out,
)
);
+TRACE_EVENT(fsi_master_gpio_clock_zeros,
+ TP_PROTO(const struct fsi_master_gpio *master, int clocks),
+ TP_ARGS(master, clocks),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, clocks)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->master.idx;
+ __entry->clocks = clocks;
+ ),
+ TP_printk("fsi-gpio%d clock %d zeros",
+ __entry->master_idx, __entry->clocks
+ )
+);
+
TRACE_EVENT(fsi_master_gpio_break,
TP_PROTO(const struct fsi_master_gpio *master),
TP_ARGS(master),
@@ -64,6 +80,92 @@ TRACE_EVENT(fsi_master_gpio_break,
)
);
+TRACE_EVENT(fsi_master_gpio_crc_cmd_error,
+ TP_PROTO(const struct fsi_master_gpio *master),
+ TP_ARGS(master),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->master.idx;
+ ),
+ TP_printk("fsi-gpio%d ----CRC command retry---",
+ __entry->master_idx
+ )
+);
+
+TRACE_EVENT(fsi_master_gpio_crc_rsp_error,
+ TP_PROTO(const struct fsi_master_gpio *master),
+ TP_ARGS(master),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->master.idx;
+ ),
+ TP_printk("fsi-gpio%d ----CRC response---",
+ __entry->master_idx
+ )
+);
+
+TRACE_EVENT(fsi_master_gpio_poll_response_busy,
+ TP_PROTO(const struct fsi_master_gpio *master, int busy),
+ TP_ARGS(master, busy),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(int, busy)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->master.idx;
+ __entry->busy = busy;
+ ),
+ TP_printk("fsi-gpio%d: device reported busy %d times",
+ __entry->master_idx, __entry->busy)
+);
+
+TRACE_EVENT(fsi_master_gpio_cmd_abs_addr,
+ TP_PROTO(const struct fsi_master_gpio *master, u32 addr),
+ TP_ARGS(master, addr),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(u32, addr)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->master.idx;
+ __entry->addr = addr;
+ ),
+ TP_printk("fsi-gpio%d: Sending ABS_ADR %06x",
+ __entry->master_idx, __entry->addr)
+);
+
+TRACE_EVENT(fsi_master_gpio_cmd_rel_addr,
+ TP_PROTO(const struct fsi_master_gpio *master, u32 rel_addr),
+ TP_ARGS(master, rel_addr),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ __field(u32, rel_addr)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->master.idx;
+ __entry->rel_addr = rel_addr;
+ ),
+ TP_printk("fsi-gpio%d: Sending REL_ADR %03x",
+ __entry->master_idx, __entry->rel_addr)
+);
+
+TRACE_EVENT(fsi_master_gpio_cmd_same_addr,
+ TP_PROTO(const struct fsi_master_gpio *master),
+ TP_ARGS(master),
+ TP_STRUCT__entry(
+ __field(int, master_idx)
+ ),
+ TP_fast_assign(
+ __entry->master_idx = master->master.idx;
+ ),
+ TP_printk("fsi-gpio%d: Sending SAME_ADR",
+ __entry->master_idx)
+);
+
#endif /* _TRACE_FSI_MASTER_GPIO_H */
#include <trace/define_trace.h>
diff --git a/include/trace/events/net.h b/include/trace/events/net.h
index 9c886739246a..00aa72ce0e7c 100644
--- a/include/trace/events/net.h
+++ b/include/trace/events/net.h
@@ -223,6 +223,13 @@ DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_entry,
TP_ARGS(skb)
);
+DEFINE_EVENT(net_dev_rx_verbose_template, netif_receive_skb_list_entry,
+
+ TP_PROTO(const struct sk_buff *skb),
+
+ TP_ARGS(skb)
+);
+
DEFINE_EVENT(net_dev_rx_verbose_template, netif_rx_entry,
TP_PROTO(const struct sk_buff *skb),
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 908977d69783..f7aece721aed 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -5,6 +5,7 @@
#if !defined(_TRACE_POWER_H) || defined(TRACE_HEADER_MULTI_READ)
#define _TRACE_POWER_H
+#include <linux/cpufreq.h>
#include <linux/ktime.h>
#include <linux/pm_qos.h>
#include <linux/tracepoint.h>
@@ -148,6 +149,30 @@ DEFINE_EVENT(cpu, cpu_frequency,
TP_ARGS(frequency, cpu_id)
);
+TRACE_EVENT(cpu_frequency_limits,
+
+ TP_PROTO(struct cpufreq_policy *policy),
+
+ TP_ARGS(policy),
+
+ TP_STRUCT__entry(
+ __field(u32, min_freq)
+ __field(u32, max_freq)
+ __field(u32, cpu_id)
+ ),
+
+ TP_fast_assign(
+ __entry->min_freq = policy->min;
+ __entry->max_freq = policy->max;
+ __entry->cpu_id = policy->cpu;
+ ),
+
+ TP_printk("min=%lu max=%lu cpu_id=%lu",
+ (unsigned long)__entry->min_freq,
+ (unsigned long)__entry->max_freq,
+ (unsigned long)__entry->cpu_id)
+);
+
TRACE_EVENT(device_pm_callback_start,
TP_PROTO(struct device *dev, const char *pm_ops, int event),
diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h
index 9c4eb33c5a1d..9a0d4ceeb166 100644
--- a/include/trace/events/preemptirq.h
+++ b/include/trace/events/preemptirq.h
@@ -1,4 +1,4 @@
-#ifdef CONFIG_PREEMPTIRQ_EVENTS
+#ifdef CONFIG_PREEMPTIRQ_TRACEPOINTS
#undef TRACE_SYSTEM
#define TRACE_SYSTEM preemptirq
@@ -32,7 +32,7 @@ DECLARE_EVENT_CLASS(preemptirq_template,
(void *)((unsigned long)(_stext) + __entry->parent_offs))
);
-#ifndef CONFIG_PROVE_LOCKING
+#ifdef CONFIG_TRACE_IRQFLAGS
DEFINE_EVENT(preemptirq_template, irq_disable,
TP_PROTO(unsigned long ip, unsigned long parent_ip),
TP_ARGS(ip, parent_ip));
@@ -40,9 +40,14 @@ DEFINE_EVENT(preemptirq_template, irq_disable,
DEFINE_EVENT(preemptirq_template, irq_enable,
TP_PROTO(unsigned long ip, unsigned long parent_ip),
TP_ARGS(ip, parent_ip));
+#else
+#define trace_irq_enable(...)
+#define trace_irq_disable(...)
+#define trace_irq_enable_rcuidle(...)
+#define trace_irq_disable_rcuidle(...)
#endif
-#ifdef CONFIG_DEBUG_PREEMPT
+#ifdef CONFIG_TRACE_PREEMPT_TOGGLE
DEFINE_EVENT(preemptirq_template, preempt_disable,
TP_PROTO(unsigned long ip, unsigned long parent_ip),
TP_ARGS(ip, parent_ip));
@@ -50,22 +55,22 @@ DEFINE_EVENT(preemptirq_template, preempt_disable,
DEFINE_EVENT(preemptirq_template, preempt_enable,
TP_PROTO(unsigned long ip, unsigned long parent_ip),
TP_ARGS(ip, parent_ip));
+#else
+#define trace_preempt_enable(...)
+#define trace_preempt_disable(...)
+#define trace_preempt_enable_rcuidle(...)
+#define trace_preempt_disable_rcuidle(...)
#endif
#endif /* _TRACE_PREEMPTIRQ_H */
#include <trace/define_trace.h>
-#endif /* !CONFIG_PREEMPTIRQ_EVENTS */
-
-#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || defined(CONFIG_PROVE_LOCKING)
+#else /* !CONFIG_PREEMPTIRQ_TRACEPOINTS */
#define trace_irq_enable(...)
#define trace_irq_disable(...)
#define trace_irq_enable_rcuidle(...)
#define trace_irq_disable_rcuidle(...)
-#endif
-
-#if !defined(CONFIG_PREEMPTIRQ_EVENTS) || !defined(CONFIG_DEBUG_PREEMPT)
#define trace_preempt_enable(...)
#define trace_preempt_disable(...)
#define trace_preempt_enable_rcuidle(...)
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 5936aac357ab..a8d07feff6a0 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -52,6 +52,7 @@ TRACE_EVENT(rcu_utilization,
* "cpuqs": CPU passes through a quiescent state.
* "cpuonl": CPU comes online.
* "cpuofl": CPU goes offline.
+ * "cpuofl-bgp": CPU goes offline while blocking a grace period.
* "reqwait": GP kthread sleeps waiting for grace-period request.
* "reqwaitsig": GP kthread awakened by signal from reqwait state.
* "fqswait": GP kthread waiting until time to force quiescent states.
@@ -63,24 +64,24 @@ TRACE_EVENT(rcu_utilization,
*/
TRACE_EVENT(rcu_grace_period,
- TP_PROTO(const char *rcuname, unsigned long gpnum, const char *gpevent),
+ TP_PROTO(const char *rcuname, unsigned long gp_seq, const char *gpevent),
- TP_ARGS(rcuname, gpnum, gpevent),
+ TP_ARGS(rcuname, gp_seq, gpevent),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(const char *, gpevent)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->gpevent = gpevent;
),
TP_printk("%s %lu %s",
- __entry->rcuname, __entry->gpnum, __entry->gpevent)
+ __entry->rcuname, __entry->gp_seq, __entry->gpevent)
);
/*
@@ -90,8 +91,8 @@ TRACE_EVENT(rcu_grace_period,
*
* "Startleaf": Request a grace period based on leaf-node data.
* "Prestarted": Someone beat us to the request
- * "Startedleaf": Leaf-node start proved sufficient.
- * "Startedleafroot": Leaf-node start proved sufficient after checking root.
+ * "Startedleaf": Leaf node marked for future GP.
+ * "Startedleafroot": All nodes from leaf to root marked for future GP.
* "Startedroot": Requested a nocb grace period based on root-node data.
* "NoGPkthread": The RCU grace-period kthread has not yet started.
* "StartWait": Start waiting for the requested grace period.
@@ -102,17 +103,16 @@ TRACE_EVENT(rcu_grace_period,
*/
TRACE_EVENT(rcu_future_grace_period,
- TP_PROTO(const char *rcuname, unsigned long gpnum, unsigned long completed,
- unsigned long c, u8 level, int grplo, int grphi,
+ TP_PROTO(const char *rcuname, unsigned long gp_seq,
+ unsigned long gp_seq_req, u8 level, int grplo, int grphi,
const char *gpevent),
- TP_ARGS(rcuname, gpnum, completed, c, level, grplo, grphi, gpevent),
+ TP_ARGS(rcuname, gp_seq, gp_seq_req, level, grplo, grphi, gpevent),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
- __field(unsigned long, completed)
- __field(unsigned long, c)
+ __field(unsigned long, gp_seq)
+ __field(unsigned long, gp_seq_req)
__field(u8, level)
__field(int, grplo)
__field(int, grphi)
@@ -121,19 +121,17 @@ TRACE_EVENT(rcu_future_grace_period,
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
- __entry->completed = completed;
- __entry->c = c;
+ __entry->gp_seq = gp_seq;
+ __entry->gp_seq_req = gp_seq_req;
__entry->level = level;
__entry->grplo = grplo;
__entry->grphi = grphi;
__entry->gpevent = gpevent;
),
- TP_printk("%s %lu %lu %lu %u %d %d %s",
- __entry->rcuname, __entry->gpnum, __entry->completed,
- __entry->c, __entry->level, __entry->grplo, __entry->grphi,
- __entry->gpevent)
+ TP_printk("%s %lu %lu %u %d %d %s",
+ __entry->rcuname, __entry->gp_seq, __entry->gp_seq_req, __entry->level,
+ __entry->grplo, __entry->grphi, __entry->gpevent)
);
/*
@@ -145,14 +143,14 @@ TRACE_EVENT(rcu_future_grace_period,
*/
TRACE_EVENT(rcu_grace_period_init,
- TP_PROTO(const char *rcuname, unsigned long gpnum, u8 level,
+ TP_PROTO(const char *rcuname, unsigned long gp_seq, u8 level,
int grplo, int grphi, unsigned long qsmask),
- TP_ARGS(rcuname, gpnum, level, grplo, grphi, qsmask),
+ TP_ARGS(rcuname, gp_seq, level, grplo, grphi, qsmask),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(u8, level)
__field(int, grplo)
__field(int, grphi)
@@ -161,7 +159,7 @@ TRACE_EVENT(rcu_grace_period_init,
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->level = level;
__entry->grplo = grplo;
__entry->grphi = grphi;
@@ -169,7 +167,7 @@ TRACE_EVENT(rcu_grace_period_init,
),
TP_printk("%s %lu %u %d %d %lx",
- __entry->rcuname, __entry->gpnum, __entry->level,
+ __entry->rcuname, __entry->gp_seq, __entry->level,
__entry->grplo, __entry->grphi, __entry->qsmask)
);
@@ -301,24 +299,24 @@ TRACE_EVENT(rcu_nocb_wake,
*/
TRACE_EVENT(rcu_preempt_task,
- TP_PROTO(const char *rcuname, int pid, unsigned long gpnum),
+ TP_PROTO(const char *rcuname, int pid, unsigned long gp_seq),
- TP_ARGS(rcuname, pid, gpnum),
+ TP_ARGS(rcuname, pid, gp_seq),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(int, pid)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->pid = pid;
),
TP_printk("%s %lu %d",
- __entry->rcuname, __entry->gpnum, __entry->pid)
+ __entry->rcuname, __entry->gp_seq, __entry->pid)
);
/*
@@ -328,23 +326,23 @@ TRACE_EVENT(rcu_preempt_task,
*/
TRACE_EVENT(rcu_unlock_preempted_task,
- TP_PROTO(const char *rcuname, unsigned long gpnum, int pid),
+ TP_PROTO(const char *rcuname, unsigned long gp_seq, int pid),
- TP_ARGS(rcuname, gpnum, pid),
+ TP_ARGS(rcuname, gp_seq, pid),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(int, pid)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->pid = pid;
),
- TP_printk("%s %lu %d", __entry->rcuname, __entry->gpnum, __entry->pid)
+ TP_printk("%s %lu %d", __entry->rcuname, __entry->gp_seq, __entry->pid)
);
/*
@@ -357,15 +355,15 @@ TRACE_EVENT(rcu_unlock_preempted_task,
*/
TRACE_EVENT(rcu_quiescent_state_report,
- TP_PROTO(const char *rcuname, unsigned long gpnum,
+ TP_PROTO(const char *rcuname, unsigned long gp_seq,
unsigned long mask, unsigned long qsmask,
u8 level, int grplo, int grphi, int gp_tasks),
- TP_ARGS(rcuname, gpnum, mask, qsmask, level, grplo, grphi, gp_tasks),
+ TP_ARGS(rcuname, gp_seq, mask, qsmask, level, grplo, grphi, gp_tasks),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(unsigned long, mask)
__field(unsigned long, qsmask)
__field(u8, level)
@@ -376,7 +374,7 @@ TRACE_EVENT(rcu_quiescent_state_report,
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->mask = mask;
__entry->qsmask = qsmask;
__entry->level = level;
@@ -386,41 +384,41 @@ TRACE_EVENT(rcu_quiescent_state_report,
),
TP_printk("%s %lu %lx>%lx %u %d %d %u",
- __entry->rcuname, __entry->gpnum,
+ __entry->rcuname, __entry->gp_seq,
__entry->mask, __entry->qsmask, __entry->level,
__entry->grplo, __entry->grphi, __entry->gp_tasks)
);
/*
* Tracepoint for quiescent states detected by force_quiescent_state().
- * These trace events include the type of RCU, the grace-period number that
- * was blocked by the CPU, the CPU itself, and the type of quiescent state,
- * which can be "dti" for dyntick-idle mode, "ofl" for CPU offline, "kick"
- * when kicking a CPU that has been in dyntick-idle mode for too long, or
- * "rqc" if the CPU got a quiescent state via its rcu_qs_ctr.
+ * These trace events include the type of RCU, the grace-period number
+ * that was blocked by the CPU, the CPU itself, and the type of quiescent
+ * state, which can be "dti" for dyntick-idle mode, "kick" when kicking
+ * a CPU that has been in dyntick-idle mode for too long, or "rqc" if the
+ * CPU got a quiescent state via its rcu_qs_ctr.
*/
TRACE_EVENT(rcu_fqs,
- TP_PROTO(const char *rcuname, unsigned long gpnum, int cpu, const char *qsevent),
+ TP_PROTO(const char *rcuname, unsigned long gp_seq, int cpu, const char *qsevent),
- TP_ARGS(rcuname, gpnum, cpu, qsevent),
+ TP_ARGS(rcuname, gp_seq, cpu, qsevent),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(unsigned long, gpnum)
+ __field(unsigned long, gp_seq)
__field(int, cpu)
__field(const char *, qsevent)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->gpnum = gpnum;
+ __entry->gp_seq = gp_seq;
__entry->cpu = cpu;
__entry->qsevent = qsevent;
),
TP_printk("%s %lu %d %s",
- __entry->rcuname, __entry->gpnum,
+ __entry->rcuname, __entry->gp_seq,
__entry->cpu, __entry->qsevent)
);
@@ -753,23 +751,23 @@ TRACE_EVENT(rcu_barrier,
#else /* #ifdef CONFIG_RCU_TRACE */
-#define trace_rcu_grace_period(rcuname, gpnum, gpevent) do { } while (0)
-#define trace_rcu_future_grace_period(rcuname, gpnum, completed, c, \
+#define trace_rcu_grace_period(rcuname, gp_seq, gpevent) do { } while (0)
+#define trace_rcu_future_grace_period(rcuname, gp_seq, gp_seq_req, \
level, grplo, grphi, event) \
do { } while (0)
-#define trace_rcu_grace_period_init(rcuname, gpnum, level, grplo, grphi, \
+#define trace_rcu_grace_period_init(rcuname, gp_seq, level, grplo, grphi, \
qsmask) do { } while (0)
#define trace_rcu_exp_grace_period(rcuname, gqseq, gpevent) \
do { } while (0)
#define trace_rcu_exp_funnel_lock(rcuname, level, grplo, grphi, gpevent) \
do { } while (0)
#define trace_rcu_nocb_wake(rcuname, cpu, reason) do { } while (0)
-#define trace_rcu_preempt_task(rcuname, pid, gpnum) do { } while (0)
-#define trace_rcu_unlock_preempted_task(rcuname, gpnum, pid) do { } while (0)
-#define trace_rcu_quiescent_state_report(rcuname, gpnum, mask, qsmask, level, \
+#define trace_rcu_preempt_task(rcuname, pid, gp_seq) do { } while (0)
+#define trace_rcu_unlock_preempted_task(rcuname, gp_seq, pid) do { } while (0)
+#define trace_rcu_quiescent_state_report(rcuname, gp_seq, mask, qsmask, level, \
grplo, grphi, gp_tasks) do { } \
while (0)
-#define trace_rcu_fqs(rcuname, gpnum, cpu, qsevent) do { } while (0)
+#define trace_rcu_fqs(rcuname, gp_seq, cpu, qsevent) do { } while (0)
#define trace_rcu_dyntick(polarity, oldnesting, newnesting, dyntick) do { } while (0)
#define trace_rcu_callback(rcuname, rhp, qlen_lazy, qlen) do { } while (0)
#define trace_rcu_kfree_callback(rcuname, rhp, offset, qlen_lazy, qlen) \
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 4fff00e9da8a..196587b8f204 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -211,18 +211,18 @@ enum rxrpc_congest_change {
rxrpc_cong_saw_nack,
};
-enum rxrpc_tx_fail_trace {
- rxrpc_tx_fail_call_abort,
- rxrpc_tx_fail_call_ack,
- rxrpc_tx_fail_call_data_frag,
- rxrpc_tx_fail_call_data_nofrag,
- rxrpc_tx_fail_call_final_resend,
- rxrpc_tx_fail_conn_abort,
- rxrpc_tx_fail_conn_challenge,
- rxrpc_tx_fail_conn_response,
- rxrpc_tx_fail_reject,
- rxrpc_tx_fail_version_keepalive,
- rxrpc_tx_fail_version_reply,
+enum rxrpc_tx_point {
+ rxrpc_tx_point_call_abort,
+ rxrpc_tx_point_call_ack,
+ rxrpc_tx_point_call_data_frag,
+ rxrpc_tx_point_call_data_nofrag,
+ rxrpc_tx_point_call_final_resend,
+ rxrpc_tx_point_conn_abort,
+ rxrpc_tx_point_rxkad_challenge,
+ rxrpc_tx_point_rxkad_response,
+ rxrpc_tx_point_reject,
+ rxrpc_tx_point_version_keepalive,
+ rxrpc_tx_point_version_reply,
};
#endif /* end __RXRPC_DECLARE_TRACE_ENUMS_ONCE_ONLY */
@@ -396,7 +396,7 @@ enum rxrpc_tx_fail_trace {
#define rxrpc_propose_ack_outcomes \
EM(rxrpc_propose_ack_subsume, " Subsume") \
EM(rxrpc_propose_ack_update, " Update") \
- E_(rxrpc_propose_ack_use, "")
+ E_(rxrpc_propose_ack_use, " New")
#define rxrpc_congest_modes \
EM(RXRPC_CALL_CONGEST_AVOIDANCE, "CongAvoid") \
@@ -452,18 +452,18 @@ enum rxrpc_tx_fail_trace {
EM(RXRPC_CALL_LOCAL_ERROR, "LocalError") \
E_(RXRPC_CALL_NETWORK_ERROR, "NetError")
-#define rxrpc_tx_fail_traces \
- EM(rxrpc_tx_fail_call_abort, "CallAbort") \
- EM(rxrpc_tx_fail_call_ack, "CallAck") \
- EM(rxrpc_tx_fail_call_data_frag, "CallDataFrag") \
- EM(rxrpc_tx_fail_call_data_nofrag, "CallDataNofrag") \
- EM(rxrpc_tx_fail_call_final_resend, "CallFinalResend") \
- EM(rxrpc_tx_fail_conn_abort, "ConnAbort") \
- EM(rxrpc_tx_fail_conn_challenge, "ConnChall") \
- EM(rxrpc_tx_fail_conn_response, "ConnResp") \
- EM(rxrpc_tx_fail_reject, "Reject") \
- EM(rxrpc_tx_fail_version_keepalive, "VerKeepalive") \
- E_(rxrpc_tx_fail_version_reply, "VerReply")
+#define rxrpc_tx_points \
+ EM(rxrpc_tx_point_call_abort, "CallAbort") \
+ EM(rxrpc_tx_point_call_ack, "CallAck") \
+ EM(rxrpc_tx_point_call_data_frag, "CallDataFrag") \
+ EM(rxrpc_tx_point_call_data_nofrag, "CallDataNofrag") \
+ EM(rxrpc_tx_point_call_final_resend, "CallFinalResend") \
+ EM(rxrpc_tx_point_conn_abort, "ConnAbort") \
+ EM(rxrpc_tx_point_reject, "Reject") \
+ EM(rxrpc_tx_point_rxkad_challenge, "RxkadChall") \
+ EM(rxrpc_tx_point_rxkad_response, "RxkadResp") \
+ EM(rxrpc_tx_point_version_keepalive, "VerKeepalive") \
+ E_(rxrpc_tx_point_version_reply, "VerReply")
/*
* Export enum symbols via userspace.
@@ -488,7 +488,7 @@ rxrpc_propose_ack_traces;
rxrpc_propose_ack_outcomes;
rxrpc_congest_modes;
rxrpc_congest_changes;
-rxrpc_tx_fail_traces;
+rxrpc_tx_points;
/*
* Now redefine the EM() and E_() macros to map the enums to the strings that
@@ -801,7 +801,7 @@ TRACE_EVENT(rxrpc_transmit,
);
TRACE_EVENT(rxrpc_rx_data,
- TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
+ TP_PROTO(unsigned int call, rxrpc_seq_t seq,
rxrpc_serial_t serial, u8 flags, u8 anno),
TP_ARGS(call, seq, serial, flags, anno),
@@ -815,7 +815,7 @@ TRACE_EVENT(rxrpc_rx_data,
),
TP_fast_assign(
- __entry->call = call->debug_id;
+ __entry->call = call;
__entry->seq = seq;
__entry->serial = serial;
__entry->flags = flags;
@@ -918,6 +918,37 @@ TRACE_EVENT(rxrpc_rx_rwind_change,
__entry->wake ? " wake" : "")
);
+TRACE_EVENT(rxrpc_tx_packet,
+ TP_PROTO(unsigned int call_id, struct rxrpc_wire_header *whdr,
+ enum rxrpc_tx_point where),
+
+ TP_ARGS(call_id, whdr, where),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, call )
+ __field(enum rxrpc_tx_point, where )
+ __field_struct(struct rxrpc_wire_header, whdr )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call_id;
+ memcpy(&__entry->whdr, whdr, sizeof(__entry->whdr));
+ ),
+
+ TP_printk("c=%08x %08x:%08x:%08x:%04x %08x %08x %02x %02x %s %s",
+ __entry->call,
+ ntohl(__entry->whdr.epoch),
+ ntohl(__entry->whdr.cid),
+ ntohl(__entry->whdr.callNumber),
+ ntohs(__entry->whdr.serviceId),
+ ntohl(__entry->whdr.serial),
+ ntohl(__entry->whdr.seq),
+ __entry->whdr.type, __entry->whdr.flags,
+ __entry->whdr.type <= 15 ?
+ __print_symbolic(__entry->whdr.type, rxrpc_pkts) : "?UNK",
+ __print_symbolic(__entry->where, rxrpc_tx_points))
+ );
+
TRACE_EVENT(rxrpc_tx_data,
TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
rxrpc_serial_t serial, u8 flags, bool retrans, bool lose),
@@ -928,6 +959,8 @@ TRACE_EVENT(rxrpc_tx_data,
__field(unsigned int, call )
__field(rxrpc_seq_t, seq )
__field(rxrpc_serial_t, serial )
+ __field(u32, cid )
+ __field(u32, call_id )
__field(u8, flags )
__field(bool, retrans )
__field(bool, lose )
@@ -935,6 +968,8 @@ TRACE_EVENT(rxrpc_tx_data,
TP_fast_assign(
__entry->call = call->debug_id;
+ __entry->cid = call->cid;
+ __entry->call_id = call->call_id;
__entry->seq = seq;
__entry->serial = serial;
__entry->flags = flags;
@@ -942,8 +977,10 @@ TRACE_EVENT(rxrpc_tx_data,
__entry->lose = lose;
),
- TP_printk("c=%08x DATA %08x q=%08x fl=%02x%s%s",
+ TP_printk("c=%08x DATA %08x:%08x %08x q=%08x fl=%02x%s%s",
__entry->call,
+ __entry->cid,
+ __entry->call_id,
__entry->serial,
__entry->seq,
__entry->flags,
@@ -952,7 +989,7 @@ TRACE_EVENT(rxrpc_tx_data,
);
TRACE_EVENT(rxrpc_tx_ack,
- TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
+ TP_PROTO(unsigned int call, rxrpc_serial_t serial,
rxrpc_seq_t ack_first, rxrpc_serial_t ack_serial,
u8 reason, u8 n_acks),
@@ -968,7 +1005,7 @@ TRACE_EVENT(rxrpc_tx_ack,
),
TP_fast_assign(
- __entry->call = call ? call->debug_id : 0;
+ __entry->call = call;
__entry->serial = serial;
__entry->ack_first = ack_first;
__entry->ack_serial = ack_serial;
@@ -1434,29 +1471,29 @@ TRACE_EVENT(rxrpc_rx_icmp,
TRACE_EVENT(rxrpc_tx_fail,
TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial, int ret,
- enum rxrpc_tx_fail_trace what),
+ enum rxrpc_tx_point where),
- TP_ARGS(debug_id, serial, ret, what),
+ TP_ARGS(debug_id, serial, ret, where),
TP_STRUCT__entry(
__field(unsigned int, debug_id )
__field(rxrpc_serial_t, serial )
__field(int, ret )
- __field(enum rxrpc_tx_fail_trace, what )
+ __field(enum rxrpc_tx_point, where )
),
TP_fast_assign(
__entry->debug_id = debug_id;
__entry->serial = serial;
__entry->ret = ret;
- __entry->what = what;
+ __entry->where = where;
),
TP_printk("c=%08x r=%x ret=%d %s",
__entry->debug_id,
__entry->serial,
__entry->ret,
- __print_symbolic(__entry->what, rxrpc_tx_fail_traces))
+ __print_symbolic(__entry->where, rxrpc_tx_points))
);
TRACE_EVENT(rxrpc_call_reset,
@@ -1491,6 +1528,26 @@ TRACE_EVENT(rxrpc_call_reset,
__entry->tx_seq, __entry->rx_seq)
);
+TRACE_EVENT(rxrpc_notify_socket,
+ TP_PROTO(unsigned int debug_id, rxrpc_serial_t serial),
+
+ TP_ARGS(debug_id, serial),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, debug_id )
+ __field(rxrpc_serial_t, serial )
+ ),
+
+ TP_fast_assign(
+ __entry->debug_id = debug_id;
+ __entry->serial = serial;
+ ),
+
+ TP_printk("c=%08x r=%08x",
+ __entry->debug_id,
+ __entry->serial)
+ );
+
#endif /* _TRACE_RXRPC_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index 3176a3931107..a0c4b8a30966 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -35,6 +35,10 @@
EM(TCP_CLOSING) \
EMe(TCP_NEW_SYN_RECV)
+#define skmem_kind_names \
+ EM(SK_MEM_SEND) \
+ EMe(SK_MEM_RECV)
+
/* enums need to be exported to user space */
#undef EM
#undef EMe
@@ -44,6 +48,7 @@
family_names
inet_protocol_names
tcp_state_names
+skmem_kind_names
#undef EM
#undef EMe
@@ -59,6 +64,9 @@ tcp_state_names
#define show_tcp_state_name(val) \
__print_symbolic(val, tcp_state_names)
+#define show_skmem_kind_names(val) \
+ __print_symbolic(val, skmem_kind_names)
+
TRACE_EVENT(sock_rcvqueue_full,
TP_PROTO(struct sock *sk, struct sk_buff *skb),
@@ -83,9 +91,9 @@ TRACE_EVENT(sock_rcvqueue_full,
TRACE_EVENT(sock_exceed_buf_limit,
- TP_PROTO(struct sock *sk, struct proto *prot, long allocated),
+ TP_PROTO(struct sock *sk, struct proto *prot, long allocated, int kind),
- TP_ARGS(sk, prot, allocated),
+ TP_ARGS(sk, prot, allocated, kind),
TP_STRUCT__entry(
__array(char, name, 32)
@@ -93,6 +101,10 @@ TRACE_EVENT(sock_exceed_buf_limit,
__field(long, allocated)
__field(int, sysctl_rmem)
__field(int, rmem_alloc)
+ __field(int, sysctl_wmem)
+ __field(int, wmem_alloc)
+ __field(int, wmem_queued)
+ __field(int, kind)
),
TP_fast_assign(
@@ -101,17 +113,25 @@ TRACE_EVENT(sock_exceed_buf_limit,
__entry->allocated = allocated;
__entry->sysctl_rmem = sk_get_rmem0(sk, prot);
__entry->rmem_alloc = atomic_read(&sk->sk_rmem_alloc);
+ __entry->sysctl_wmem = sk_get_wmem0(sk, prot);
+ __entry->wmem_alloc = refcount_read(&sk->sk_wmem_alloc);
+ __entry->wmem_queued = sk->sk_wmem_queued;
+ __entry->kind = kind;
),
- TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld "
- "sysctl_rmem=%d rmem_alloc=%d",
+ TP_printk("proto:%s sysctl_mem=%ld,%ld,%ld allocated=%ld sysctl_rmem=%d rmem_alloc=%d sysctl_wmem=%d wmem_alloc=%d wmem_queued=%d kind=%s",
__entry->name,
__entry->sysctl_mem[0],
__entry->sysctl_mem[1],
__entry->sysctl_mem[2],
__entry->allocated,
__entry->sysctl_rmem,
- __entry->rmem_alloc)
+ __entry->rmem_alloc,
+ __entry->sysctl_wmem,
+ __entry->wmem_alloc,
+ __entry->wmem_queued,
+ show_skmem_kind_names(__entry->kind)
+ )
);
TRACE_EVENT(inet_sock_set_state,
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index 1ecf4c67fcf7..e95cb86b65cf 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -147,9 +147,8 @@ struct _bpf_dtab_netdev {
#define devmap_ifindex(fwd, map) \
(!fwd ? 0 : \
- (!map ? 0 : \
- ((map->map_type == BPF_MAP_TYPE_DEVMAP) ? \
- ((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0)))
+ ((map->map_type == BPF_MAP_TYPE_DEVMAP) ? \
+ ((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0))
#define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \
trace_xdp_redirect_map(dev, xdp, devmap_ifindex(fwd, map), \
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 0ae758c90e54..a12692e5f7a8 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -107,4 +107,7 @@
#define SO_ZEROCOPY 60
+#define SO_TXTIME 61
+#define SCM_TXTIME SO_TXTIME
+
#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 42990676a55e..df4bedb9b01c 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -734,9 +734,11 @@ __SYSCALL(__NR_pkey_free, sys_pkey_free)
__SYSCALL(__NR_statx, sys_statx)
#define __NR_io_pgetevents 292
__SC_COMP(__NR_io_pgetevents, sys_io_pgetevents, compat_sys_io_pgetevents)
+#define __NR_rseq 293
+__SYSCALL(__NR_rseq, sys_rseq)
#undef __NR_syscalls
-#define __NR_syscalls 293
+#define __NR_syscalls 294
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 78b4dd89fcb4..1ceec56de015 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -72,6 +72,29 @@ extern "C" {
#define DRM_IOCTL_AMDGPU_FENCE_TO_HANDLE DRM_IOWR(DRM_COMMAND_BASE + DRM_AMDGPU_FENCE_TO_HANDLE, union drm_amdgpu_fence_to_handle)
#define DRM_IOCTL_AMDGPU_SCHED DRM_IOW(DRM_COMMAND_BASE + DRM_AMDGPU_SCHED, union drm_amdgpu_sched)
+/**
+ * DOC: memory domains
+ *
+ * %AMDGPU_GEM_DOMAIN_CPU System memory that is not GPU accessible.
+ * Memory in this pool could be swapped out to disk if there is pressure.
+ *
+ * %AMDGPU_GEM_DOMAIN_GTT GPU accessible system memory, mapped into the
+ * GPU's virtual address space via gart. Gart memory linearizes non-contiguous
+ * pages of system memory, allows GPU access system memory in a linezrized
+ * fashion.
+ *
+ * %AMDGPU_GEM_DOMAIN_VRAM Local video memory. For APUs, it is memory
+ * carved out by the BIOS.
+ *
+ * %AMDGPU_GEM_DOMAIN_GDS Global on-chip data storage used to share data
+ * across shader threads.
+ *
+ * %AMDGPU_GEM_DOMAIN_GWS Global wave sync, used to synchronize the
+ * execution of all the waves on a device.
+ *
+ * %AMDGPU_GEM_DOMAIN_OA Ordered append, used by 3D or Compute engines
+ * for appending data.
+ */
#define AMDGPU_GEM_DOMAIN_CPU 0x1
#define AMDGPU_GEM_DOMAIN_GTT 0x2
#define AMDGPU_GEM_DOMAIN_VRAM 0x4
@@ -483,7 +506,8 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_HW_IP_UVD_ENC 5
#define AMDGPU_HW_IP_VCN_DEC 6
#define AMDGPU_HW_IP_VCN_ENC 7
-#define AMDGPU_HW_IP_NUM 8
+#define AMDGPU_HW_IP_VCN_JPEG 8
+#define AMDGPU_HW_IP_NUM 9
#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
@@ -492,6 +516,7 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_CHUNK_ID_DEPENDENCIES 0x03
#define AMDGPU_CHUNK_ID_SYNCOBJ_IN 0x04
#define AMDGPU_CHUNK_ID_SYNCOBJ_OUT 0x05
+#define AMDGPU_CHUNK_ID_BO_HANDLES 0x06
struct drm_amdgpu_cs_chunk {
__u32 chunk_id;
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 9c660e1688ab..300f336633f2 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -687,6 +687,15 @@ struct drm_get_cap {
*/
#define DRM_CLIENT_CAP_ASPECT_RATIO 4
+/**
+ * DRM_CLIENT_CAP_WRITEBACK_CONNECTORS
+ *
+ * If set to 1, the DRM core will expose special connectors to be used for
+ * writing back to memory the scene setup in the commit. Depends on client
+ * also supporting DRM_CLIENT_CAP_ATOMIC
+ */
+#define DRM_CLIENT_CAP_WRITEBACK_CONNECTORS 5
+
/** DRM_IOCTL_SET_CLIENT_CAP ioctl argument type */
struct drm_set_client_cap {
__u64 capability;
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index e04613d30a13..721ab7e54d96 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -183,6 +183,7 @@ extern "C" {
#define DRM_FORMAT_MOD_VENDOR_QCOM 0x05
#define DRM_FORMAT_MOD_VENDOR_VIVANTE 0x06
#define DRM_FORMAT_MOD_VENDOR_BROADCOM 0x07
+#define DRM_FORMAT_MOD_VENDOR_ARM 0x08
/* add more to the end as needed */
#define DRM_FORMAT_RESERVED ((1ULL << 56) - 1)
@@ -298,6 +299,19 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_SAMSUNG_64_32_TILE fourcc_mod_code(SAMSUNG, 1)
+/*
+ * Qualcomm Compressed Format
+ *
+ * Refers to a compressed variant of the base format that is compressed.
+ * Implementation may be platform and base-format specific.
+ *
+ * Each macrotile consists of m x n (mostly 4 x 4) tiles.
+ * Pixel data pitch/stride is aligned with macrotile width.
+ * Pixel data height is aligned with macrotile height.
+ * Entire pixel data buffer is aligned with 4k(bytes).
+ */
+#define DRM_FORMAT_MOD_QCOM_COMPRESSED fourcc_mod_code(QCOM, 1)
+
/* Vivante framebuffer modifiers */
/*
@@ -385,6 +399,23 @@ extern "C" {
fourcc_mod_code(NVIDIA, 0x15)
/*
+ * Some Broadcom modifiers take parameters, for example the number of
+ * vertical lines in the image. Reserve the lower 32 bits for modifier
+ * type, and the next 24 bits for parameters. Top 8 bits are the
+ * vendor code.
+ */
+#define __fourcc_mod_broadcom_param_shift 8
+#define __fourcc_mod_broadcom_param_bits 48
+#define fourcc_mod_broadcom_code(val, params) \
+ fourcc_mod_code(BROADCOM, ((((__u64)params) << __fourcc_mod_broadcom_param_shift) | val))
+#define fourcc_mod_broadcom_param(m) \
+ ((int)(((m) >> __fourcc_mod_broadcom_param_shift) & \
+ ((1ULL << __fourcc_mod_broadcom_param_bits) - 1)))
+#define fourcc_mod_broadcom_mod(m) \
+ ((m) & ~(((1ULL << __fourcc_mod_broadcom_param_bits) - 1) << \
+ __fourcc_mod_broadcom_param_shift))
+
+/*
* Broadcom VC4 "T" format
*
* This is the primary layout that the V3D GPU can texture from (it
@@ -405,6 +436,151 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_BROADCOM_VC4_T_TILED fourcc_mod_code(BROADCOM, 1)
+/*
+ * Broadcom SAND format
+ *
+ * This is the native format that the H.264 codec block uses. For VC4
+ * HVS, it is only valid for H.264 (NV12/21) and RGBA modes.
+ *
+ * The image can be considered to be split into columns, and the
+ * columns are placed consecutively into memory. The width of those
+ * columns can be either 32, 64, 128, or 256 pixels, but in practice
+ * only 128 pixel columns are used.
+ *
+ * The pitch between the start of each column is set to optimally
+ * switch between SDRAM banks. This is passed as the number of lines
+ * of column width in the modifier (we can't use the stride value due
+ * to various core checks that look at it , so you should set the
+ * stride to width*cpp).
+ *
+ * Note that the column height for this format modifier is the same
+ * for all of the planes, assuming that each column contains both Y
+ * and UV. Some SAND-using hardware stores UV in a separate tiled
+ * image from Y to reduce the column height, which is not supported
+ * with these modifiers.
+ */
+
+#define DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(v) \
+ fourcc_mod_broadcom_code(2, v)
+#define DRM_FORMAT_MOD_BROADCOM_SAND64_COL_HEIGHT(v) \
+ fourcc_mod_broadcom_code(3, v)
+#define DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(v) \
+ fourcc_mod_broadcom_code(4, v)
+#define DRM_FORMAT_MOD_BROADCOM_SAND256_COL_HEIGHT(v) \
+ fourcc_mod_broadcom_code(5, v)
+
+#define DRM_FORMAT_MOD_BROADCOM_SAND32 \
+ DRM_FORMAT_MOD_BROADCOM_SAND32_COL_HEIGHT(0)
+#define DRM_FORMAT_MOD_BROADCOM_SAND64 \
+ DRM_FORMAT_MOD_BROADCOM_SAND64_COL_HEIGHT(0)
+#define DRM_FORMAT_MOD_BROADCOM_SAND128 \
+ DRM_FORMAT_MOD_BROADCOM_SAND128_COL_HEIGHT(0)
+#define DRM_FORMAT_MOD_BROADCOM_SAND256 \
+ DRM_FORMAT_MOD_BROADCOM_SAND256_COL_HEIGHT(0)
+
+/* Broadcom UIF format
+ *
+ * This is the common format for the current Broadcom multimedia
+ * blocks, including V3D 3.x and newer, newer video codecs, and
+ * displays.
+ *
+ * The image consists of utiles (64b blocks), UIF blocks (2x2 utiles),
+ * and macroblocks (4x4 UIF blocks). Those 4x4 UIF block groups are
+ * stored in columns, with padding between the columns to ensure that
+ * moving from one column to the next doesn't hit the same SDRAM page
+ * bank.
+ *
+ * To calculate the padding, it is assumed that each hardware block
+ * and the software driving it knows the platform's SDRAM page size,
+ * number of banks, and XOR address, and that it's identical between
+ * all blocks using the format. This tiling modifier will use XOR as
+ * necessary to reduce the padding. If a hardware block can't do XOR,
+ * the assumption is that a no-XOR tiling modifier will be created.
+ */
+#define DRM_FORMAT_MOD_BROADCOM_UIF fourcc_mod_code(BROADCOM, 6)
+
+/*
+ * Arm Framebuffer Compression (AFBC) modifiers
+ *
+ * AFBC is a proprietary lossless image compression protocol and format.
+ * It provides fine-grained random access and minimizes the amount of data
+ * transferred between IP blocks.
+ *
+ * AFBC has several features which may be supported and/or used, which are
+ * represented using bits in the modifier. Not all combinations are valid,
+ * and different devices or use-cases may support different combinations.
+ */
+#define DRM_FORMAT_MOD_ARM_AFBC(__afbc_mode) fourcc_mod_code(ARM, __afbc_mode)
+
+/*
+ * AFBC superblock size
+ *
+ * Indicates the superblock size(s) used for the AFBC buffer. The buffer
+ * size (in pixels) must be aligned to a multiple of the superblock size.
+ * Four lowest significant bits(LSBs) are reserved for block size.
+ */
+#define AFBC_FORMAT_MOD_BLOCK_SIZE_MASK 0xf
+#define AFBC_FORMAT_MOD_BLOCK_SIZE_16x16 (1ULL)
+#define AFBC_FORMAT_MOD_BLOCK_SIZE_32x8 (2ULL)
+
+/*
+ * AFBC lossless colorspace transform
+ *
+ * Indicates that the buffer makes use of the AFBC lossless colorspace
+ * transform.
+ */
+#define AFBC_FORMAT_MOD_YTR (1ULL << 4)
+
+/*
+ * AFBC block-split
+ *
+ * Indicates that the payload of each superblock is split. The second
+ * half of the payload is positioned at a predefined offset from the start
+ * of the superblock payload.
+ */
+#define AFBC_FORMAT_MOD_SPLIT (1ULL << 5)
+
+/*
+ * AFBC sparse layout
+ *
+ * This flag indicates that the payload of each superblock must be stored at a
+ * predefined position relative to the other superblocks in the same AFBC
+ * buffer. This order is the same order used by the header buffer. In this mode
+ * each superblock is given the same amount of space as an uncompressed
+ * superblock of the particular format would require, rounding up to the next
+ * multiple of 128 bytes in size.
+ */
+#define AFBC_FORMAT_MOD_SPARSE (1ULL << 6)
+
+/*
+ * AFBC copy-block restrict
+ *
+ * Buffers with this flag must obey the copy-block restriction. The restriction
+ * is such that there are no copy-blocks referring across the border of 8x8
+ * blocks. For the subsampled data the 8x8 limitation is also subsampled.
+ */
+#define AFBC_FORMAT_MOD_CBR (1ULL << 7)
+
+/*
+ * AFBC tiled layout
+ *
+ * The tiled layout groups superblocks in 8x8 or 4x4 tiles, where all
+ * superblocks inside a tile are stored together in memory. 8x8 tiles are used
+ * for pixel formats up to and including 32 bpp while 4x4 tiles are used for
+ * larger bpp formats. The order between the tiles is scan line.
+ * When the tiled layout is used, the buffer size (in pixels) must be aligned
+ * to the tile size.
+ */
+#define AFBC_FORMAT_MOD_TILED (1ULL << 8)
+
+/*
+ * AFBC solid color blocks
+ *
+ * Indicates that the buffer makes use of solid-color blocks, whereby bandwidth
+ * can be reduced if a whole superblock is a single color.
+ */
+#define AFBC_FORMAT_MOD_SC (1ULL << 9)
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index 4b3a1bb58e68..8d67243952f4 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -96,6 +96,13 @@ extern "C" {
#define DRM_MODE_PICTURE_ASPECT_64_27 3
#define DRM_MODE_PICTURE_ASPECT_256_135 4
+/* Content type options */
+#define DRM_MODE_CONTENT_TYPE_NO_DATA 0
+#define DRM_MODE_CONTENT_TYPE_GRAPHICS 1
+#define DRM_MODE_CONTENT_TYPE_PHOTO 2
+#define DRM_MODE_CONTENT_TYPE_CINEMA 3
+#define DRM_MODE_CONTENT_TYPE_GAME 4
+
/* Aspect ratio flag bitmask (4 bits 22:19) */
#define DRM_MODE_FLAG_PIC_AR_MASK (0x0F<<19)
#define DRM_MODE_FLAG_PIC_AR_NONE \
@@ -344,6 +351,7 @@ enum drm_mode_subconnector {
#define DRM_MODE_CONNECTOR_VIRTUAL 15
#define DRM_MODE_CONNECTOR_DSI 16
#define DRM_MODE_CONNECTOR_DPI 17
+#define DRM_MODE_CONNECTOR_WRITEBACK 18
struct drm_mode_get_connector {
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 0bc784f5e0db..399f58317cff 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -40,6 +40,7 @@ extern "C" {
#define DRM_VMW_GET_PARAM 0
#define DRM_VMW_ALLOC_DMABUF 1
+#define DRM_VMW_ALLOC_BO 1
#define DRM_VMW_UNREF_DMABUF 2
#define DRM_VMW_HANDLE_CLOSE 2
#define DRM_VMW_CURSOR_BYPASS 3
@@ -68,6 +69,8 @@ extern "C" {
#define DRM_VMW_GB_SURFACE_REF 24
#define DRM_VMW_SYNCCPU 25
#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
+#define DRM_VMW_GB_SURFACE_CREATE_EXT 27
+#define DRM_VMW_GB_SURFACE_REF_EXT 28
/*************************************************************************/
/**
@@ -79,6 +82,9 @@ extern "C" {
*
* DRM_VMW_PARAM_OVERLAY_IOCTL:
* Does the driver support the overlay ioctl.
+ *
+ * DRM_VMW_PARAM_SM4_1
+ * SM4_1 support is enabled.
*/
#define DRM_VMW_PARAM_NUM_STREAMS 0
@@ -94,6 +100,8 @@ extern "C" {
#define DRM_VMW_PARAM_MAX_MOB_SIZE 10
#define DRM_VMW_PARAM_SCREEN_TARGET 11
#define DRM_VMW_PARAM_DX 12
+#define DRM_VMW_PARAM_HW_CAPS2 13
+#define DRM_VMW_PARAM_SM4_1 14
/**
* enum drm_vmw_handle_type - handle type for ref ioctls
@@ -356,9 +364,9 @@ struct drm_vmw_fence_rep {
/*************************************************************************/
/**
- * DRM_VMW_ALLOC_DMABUF
+ * DRM_VMW_ALLOC_BO
*
- * Allocate a DMA buffer that is visible also to the host.
+ * Allocate a buffer object that is visible also to the host.
* NOTE: The buffer is
* identified by a handle and an offset, which are private to the guest, but
* useable in the command stream. The guest kernel may translate these
@@ -366,27 +374,28 @@ struct drm_vmw_fence_rep {
* be zero at all times, or it may disappear from the interface before it is
* fixed.
*
- * The DMA buffer may stay user-space mapped in the guest at all times,
+ * The buffer object may stay user-space mapped in the guest at all times,
* and is thus suitable for sub-allocation.
*
- * DMA buffers are mapped using the mmap() syscall on the drm device.
+ * Buffer objects are mapped using the mmap() syscall on the drm device.
*/
/**
- * struct drm_vmw_alloc_dmabuf_req
+ * struct drm_vmw_alloc_bo_req
*
* @size: Required minimum size of the buffer.
*
- * Input data to the DRM_VMW_ALLOC_DMABUF Ioctl.
+ * Input data to the DRM_VMW_ALLOC_BO Ioctl.
*/
-struct drm_vmw_alloc_dmabuf_req {
+struct drm_vmw_alloc_bo_req {
__u32 size;
__u32 pad64;
};
+#define drm_vmw_alloc_dmabuf_req drm_vmw_alloc_bo_req
/**
- * struct drm_vmw_dmabuf_rep
+ * struct drm_vmw_bo_rep
*
* @map_handle: Offset to use in the mmap() call used to map the buffer.
* @handle: Handle unique to this buffer. Used for unreferencing.
@@ -395,50 +404,32 @@ struct drm_vmw_alloc_dmabuf_req {
* @cur_gmr_offset: Offset to use in the command stream when this buffer is
* referenced. See note above.
*
- * Output data from the DRM_VMW_ALLOC_DMABUF Ioctl.
+ * Output data from the DRM_VMW_ALLOC_BO Ioctl.
*/
-struct drm_vmw_dmabuf_rep {
+struct drm_vmw_bo_rep {
__u64 map_handle;
__u32 handle;
__u32 cur_gmr_id;
__u32 cur_gmr_offset;
__u32 pad64;
};
+#define drm_vmw_dmabuf_rep drm_vmw_bo_rep
/**
- * union drm_vmw_dmabuf_arg
+ * union drm_vmw_alloc_bo_arg
*
* @req: Input data as described above.
* @rep: Output data as described above.
*
- * Argument to the DRM_VMW_ALLOC_DMABUF Ioctl.
+ * Argument to the DRM_VMW_ALLOC_BO Ioctl.
*/
-union drm_vmw_alloc_dmabuf_arg {
- struct drm_vmw_alloc_dmabuf_req req;
- struct drm_vmw_dmabuf_rep rep;
-};
-
-/*************************************************************************/
-/**
- * DRM_VMW_UNREF_DMABUF - Free a DMA buffer.
- *
- */
-
-/**
- * struct drm_vmw_unref_dmabuf_arg
- *
- * @handle: Handle indicating what buffer to free. Obtained from the
- * DRM_VMW_ALLOC_DMABUF Ioctl.
- *
- * Argument to the DRM_VMW_UNREF_DMABUF Ioctl.
- */
-
-struct drm_vmw_unref_dmabuf_arg {
- __u32 handle;
- __u32 pad64;
+union drm_vmw_alloc_bo_arg {
+ struct drm_vmw_alloc_bo_req req;
+ struct drm_vmw_bo_rep rep;
};
+#define drm_vmw_alloc_dmabuf_arg drm_vmw_alloc_bo_arg
/*************************************************************************/
/**
@@ -1103,9 +1094,8 @@ union drm_vmw_extended_context_arg {
* DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
* underlying resource.
*
- * Note that this ioctl is overlaid on the DRM_VMW_UNREF_DMABUF Ioctl.
- * The ioctl arguments therefore need to be identical in layout.
- *
+ * Note that this ioctl is overlaid on the deprecated DRM_VMW_UNREF_DMABUF
+ * Ioctl.
*/
/**
@@ -1119,7 +1109,107 @@ struct drm_vmw_handle_close_arg {
__u32 handle;
__u32 pad64;
};
+#define drm_vmw_unref_dmabuf_arg drm_vmw_handle_close_arg
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GB_SURFACE_CREATE_EXT - Create a host guest-backed surface.
+ *
+ * Allocates a surface handle and queues a create surface command
+ * for the host on the first use of the surface. The surface ID can
+ * be used as the surface ID in commands referencing the surface.
+ *
+ * This new command extends DRM_VMW_GB_SURFACE_CREATE by adding version
+ * parameter and 64 bit svga flag.
+ */
+
+/**
+ * enum drm_vmw_surface_version
+ *
+ * @drm_vmw_surface_gb_v1: Corresponds to current gb surface format with
+ * svga3d surface flags split into 2, upper half and lower half.
+ */
+enum drm_vmw_surface_version {
+ drm_vmw_gb_surface_v1
+};
+
+/**
+ * struct drm_vmw_gb_surface_create_ext_req
+ *
+ * @base: Surface create parameters.
+ * @version: Version of surface create ioctl.
+ * @svga3d_flags_upper_32_bits: Upper 32 bits of svga3d flags.
+ * @multisample_pattern: Multisampling pattern when msaa is supported.
+ * @quality_level: Precision settings for each sample.
+ * @must_be_zero: Reserved for future usage.
+ *
+ * Input argument to the DRM_VMW_GB_SURFACE_CREATE_EXT Ioctl.
+ * Part of output argument for the DRM_VMW_GB_SURFACE_REF_EXT Ioctl.
+ */
+struct drm_vmw_gb_surface_create_ext_req {
+ struct drm_vmw_gb_surface_create_req base;
+ enum drm_vmw_surface_version version;
+ uint32_t svga3d_flags_upper_32_bits;
+ SVGA3dMSPattern multisample_pattern;
+ SVGA3dMSQualityLevel quality_level;
+ uint64_t must_be_zero;
+};
+
+/**
+ * union drm_vmw_gb_surface_create_ext_arg
+ *
+ * @req: Input argument as described above.
+ * @rep: Output argument as described above.
+ *
+ * Argument to the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
+ */
+union drm_vmw_gb_surface_create_ext_arg {
+ struct drm_vmw_gb_surface_create_rep rep;
+ struct drm_vmw_gb_surface_create_ext_req req;
+};
+
+/*************************************************************************/
+/**
+ * DRM_VMW_GB_SURFACE_REF_EXT - Reference a host surface.
+ *
+ * Puts a reference on a host surface with a given handle, as previously
+ * returned by the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
+ * A reference will make sure the surface isn't destroyed while we hold
+ * it and will allow the calling client to use the surface handle in
+ * the command stream.
+ *
+ * On successful return, the Ioctl returns the surface information given
+ * to and returned from the DRM_VMW_GB_SURFACE_CREATE_EXT ioctl.
+ */
+/**
+ * struct drm_vmw_gb_surface_ref_ext_rep
+ *
+ * @creq: The data used as input when the surface was created, as described
+ * above at "struct drm_vmw_gb_surface_create_ext_req"
+ * @crep: Additional data output when the surface was created, as described
+ * above at "struct drm_vmw_gb_surface_create_rep"
+ *
+ * Output Argument to the DRM_VMW_GB_SURFACE_REF_EXT ioctl.
+ */
+struct drm_vmw_gb_surface_ref_ext_rep {
+ struct drm_vmw_gb_surface_create_ext_req creq;
+ struct drm_vmw_gb_surface_create_rep crep;
+};
+
+/**
+ * union drm_vmw_gb_surface_reference_ext_arg
+ *
+ * @req: Input data as described above at "struct drm_vmw_surface_arg"
+ * @rep: Output data as described above at
+ * "struct drm_vmw_gb_surface_ref_ext_rep"
+ *
+ * Argument to the DRM_VMW_GB_SURFACE_REF Ioctl.
+ */
+union drm_vmw_gb_surface_reference_ext_arg {
+ struct drm_vmw_gb_surface_ref_ext_rep rep;
+ struct drm_vmw_surface_arg req;
+};
#if defined(__cplusplus)
}
diff --git a/include/uapi/linux/aio_abi.h b/include/uapi/linux/aio_abi.h
index d00221345c19..ce43d340f010 100644
--- a/include/uapi/linux/aio_abi.h
+++ b/include/uapi/linux/aio_abi.h
@@ -29,7 +29,6 @@
#include <linux/types.h>
#include <linux/fs.h>
-#include <linux/signal.h>
#include <asm/byteorder.h>
typedef __kernel_ulong_t aio_context_t;
@@ -108,10 +107,5 @@ struct iocb {
#undef IFBIG
#undef IFLITTLE
-struct __aio_sigset {
- const sigset_t __user *sigmask;
- size_t sigsetsize;
-};
-
#endif /* __LINUX__AIO_ABI_H */
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index c35aee9ad4a6..818ae690ab79 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -148,6 +148,7 @@
#define AUDIT_INTEGRITY_PCR 1804 /* PCR invalidation msgs */
#define AUDIT_INTEGRITY_RULE 1805 /* policy rule */
#define AUDIT_INTEGRITY_EVM_XATTR 1806 /* New EVM-covered xattr */
+#define AUDIT_INTEGRITY_POLICY_RULE 1807 /* IMA policy rules */
#define AUDIT_KERNEL 2000 /* Asynchronous audit record. NOT A REQUEST. */
@@ -157,7 +158,8 @@
#define AUDIT_FILTER_ENTRY 0x02 /* Apply rule at syscall entry */
#define AUDIT_FILTER_WATCH 0x03 /* Apply rule to file system watches */
#define AUDIT_FILTER_EXIT 0x04 /* Apply rule at syscall exit */
-#define AUDIT_FILTER_TYPE 0x05 /* Apply rule at audit_log_start */
+#define AUDIT_FILTER_EXCLUDE 0x05 /* Apply rule before record creation */
+#define AUDIT_FILTER_TYPE AUDIT_FILTER_EXCLUDE /* obsolete misleading naming */
#define AUDIT_FILTER_FS 0x06 /* Apply rule at __audit_inode_child */
#define AUDIT_NR_FILTERS 7
diff --git a/include/uapi/linux/auto_fs.h b/include/uapi/linux/auto_fs.h
index e13eec3dfb2f..df31aa9c9a8c 100644
--- a/include/uapi/linux/auto_fs.h
+++ b/include/uapi/linux/auto_fs.h
@@ -23,7 +23,7 @@
#define AUTOFS_MIN_PROTO_VERSION 3
#define AUTOFS_MAX_PROTO_VERSION 5
-#define AUTOFS_PROTO_SUBVERSION 2
+#define AUTOFS_PROTO_SUBVERSION 3
/*
* The wait_queue_token (autofs_wqt_t) is part of a structure which is passed
@@ -90,8 +90,10 @@ enum {
/* autofs version 4 and later definitions */
/* Mask for expire behaviour */
-#define AUTOFS_EXP_IMMEDIATE 1
-#define AUTOFS_EXP_LEAVES 2
+#define AUTOFS_EXP_NORMAL 0x00
+#define AUTOFS_EXP_IMMEDIATE 0x01
+#define AUTOFS_EXP_LEAVES 0x02
+#define AUTOFS_EXP_FORCED 0x04
#define AUTOFS_TYPE_ANY 0U
#define AUTOFS_TYPE_INDIRECT 1U
diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
index 821f71a2e48f..5d4f58e059fd 100644
--- a/include/uapi/linux/bcache.h
+++ b/include/uapi/linux/bcache.h
@@ -30,10 +30,10 @@ struct bkey {
BITMASK(name, struct bkey, field, offset, size)
#define PTR_FIELD(name, offset, size) \
-static inline __u64 name(const struct bkey *k, unsigned i) \
+static inline __u64 name(const struct bkey *k, unsigned int i) \
{ return (k->ptr[i] >> offset) & ~(~0ULL << size); } \
\
-static inline void SET_##name(struct bkey *k, unsigned i, __u64 v) \
+static inline void SET_##name(struct bkey *k, unsigned int i, __u64 v) \
{ \
k->ptr[i] &= ~(~(~0ULL << size) << offset); \
k->ptr[i] |= (v & ~(~0ULL << size)) << offset; \
@@ -117,12 +117,14 @@ static inline void bkey_copy_key(struct bkey *dest, const struct bkey *src)
static inline struct bkey *bkey_next(const struct bkey *k)
{
__u64 *d = (void *) k;
+
return (struct bkey *) (d + bkey_u64s(k));
}
-static inline struct bkey *bkey_idx(const struct bkey *k, unsigned nr_keys)
+static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys)
{
__u64 *d = (void *) k;
+
return (struct bkey *) (d + nr_keys);
}
/* Enough for a key with 6 pointers */
@@ -195,7 +197,7 @@ struct cache_sb {
};
};
- __u32 last_mount; /* time_t */
+ __u32 last_mount; /* time overflow in y2106 */
__u16 first_bucket;
union {
@@ -318,7 +320,7 @@ struct uuid_entry {
struct {
__u8 uuid[16];
__u8 label[32];
- __u32 first_reg;
+ __u32 first_reg; /* time overflow in y2106 */
__u32 last_reg;
__u32 invalidated;
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
index e3c70fe6bf0f..ff5a5db8906a 100644
--- a/include/uapi/linux/blkzoned.h
+++ b/include/uapi/linux/blkzoned.h
@@ -117,7 +117,7 @@ struct blk_zone_report {
__u32 nr_zones;
__u8 reserved[4];
struct blk_zone zones[0];
-} __packed;
+};
/**
* struct blk_zone_range - BLKRESETZONE ioctl request
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 59b19b6a40d7..66917a4eba27 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -75,6 +75,11 @@ struct bpf_lpm_trie_key {
__u8 data[0]; /* Arbitrary size */
};
+struct bpf_cgroup_storage_key {
+ __u64 cgroup_inode_id; /* cgroup inode id */
+ __u32 attach_type; /* program attach type */
+};
+
/* BPF syscall commands, see bpf(2) man-page for details. */
enum bpf_cmd {
BPF_MAP_CREATE,
@@ -120,6 +125,8 @@ enum bpf_map_type {
BPF_MAP_TYPE_CPUMAP,
BPF_MAP_TYPE_XSKMAP,
BPF_MAP_TYPE_SOCKHASH,
+ BPF_MAP_TYPE_CGROUP_STORAGE,
+ BPF_MAP_TYPE_REUSEPORT_SOCKARRAY,
};
enum bpf_prog_type {
@@ -144,6 +151,7 @@ enum bpf_prog_type {
BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
BPF_PROG_TYPE_LWT_SEG6LOCAL,
BPF_PROG_TYPE_LIRC_MODE2,
+ BPF_PROG_TYPE_SK_REUSEPORT,
};
enum bpf_attach_type {
@@ -1371,6 +1379,20 @@ union bpf_attr {
* A 8-byte long non-decreasing number on success, or 0 if the
* socket field is missing inside *skb*.
*
+ * u64 bpf_get_socket_cookie(struct bpf_sock_addr *ctx)
+ * Description
+ * Equivalent to bpf_get_socket_cookie() helper that accepts
+ * *skb*, but gets socket from **struct bpf_sock_addr** contex.
+ * Return
+ * A 8-byte long non-decreasing number.
+ *
+ * u64 bpf_get_socket_cookie(struct bpf_sock_ops *ctx)
+ * Description
+ * Equivalent to bpf_get_socket_cookie() helper that accepts
+ * *skb*, but gets socket from **struct bpf_sock_ops** contex.
+ * Return
+ * A 8-byte long non-decreasing number.
+ *
* u32 bpf_get_socket_uid(struct sk_buff *skb)
* Return
* The owner UID of the socket associated to *skb*. If the socket
@@ -1826,7 +1848,7 @@ union bpf_attr {
* A non-negative value equal to or less than *size* on success,
* or a negative error in case of failure.
*
- * int skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
+ * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
* Description
* This helper is similar to **bpf_skb_load_bytes**\ () in that
* it provides an easy way to load *len* bytes from *offset*
@@ -1857,7 +1879,8 @@ union bpf_attr {
* is resolved), the nexthop address is returned in ipv4_dst
* or ipv6_dst based on family, smac is set to mac address of
* egress device, dmac is set to nexthop mac address, rt_metric
- * is set to metric from route (IPv4/IPv6 only).
+ * is set to metric from route (IPv4/IPv6 only), and ifindex
+ * is set to the device index of the nexthop from the FIB lookup.
*
* *plen* argument is the size of the passed in struct.
* *flags* argument can be a combination of one or more of the
@@ -1873,9 +1896,10 @@ union bpf_attr {
* *ctx* is either **struct xdp_md** for XDP programs or
* **struct sk_buff** tc cls_act programs.
* Return
- * Egress device index on success, 0 if packet needs to continue
- * up the stack for further processing or a negative error in case
- * of failure.
+ * * < 0 if any input argument is invalid
+ * * 0 on success (packet is forwarded, nexthop neighbor exists)
+ * * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
+ * packet is not forwarded or needs assist from full stack
*
* int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
* Description
@@ -2031,7 +2055,6 @@ union bpf_attr {
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
- *
* Return
* 0
*
@@ -2051,7 +2074,6 @@ union bpf_attr {
* This helper is only available is the kernel was compiled with
* the **CONFIG_BPF_LIRC_MODE2** configuration option set to
* "**y**".
- *
* Return
* 0
*
@@ -2071,10 +2093,54 @@ union bpf_attr {
* Return
* The id is returned or 0 in case the id could not be retrieved.
*
+ * u64 bpf_skb_ancestor_cgroup_id(struct sk_buff *skb, int ancestor_level)
+ * Description
+ * Return id of cgroup v2 that is ancestor of cgroup associated
+ * with the *skb* at the *ancestor_level*. The root cgroup is at
+ * *ancestor_level* zero and each step down the hierarchy
+ * increments the level. If *ancestor_level* == level of cgroup
+ * associated with *skb*, then return value will be same as that
+ * of **bpf_skb_cgroup_id**\ ().
+ *
+ * The helper is useful to implement policies based on cgroups
+ * that are upper in hierarchy than immediate cgroup associated
+ * with *skb*.
+ *
+ * The format of returned id and helper limitations are same as in
+ * **bpf_skb_cgroup_id**\ ().
+ * Return
+ * The id is returned or 0 in case the id could not be retrieved.
+ *
* u64 bpf_get_current_cgroup_id(void)
* Return
* A 64-bit integer containing the current cgroup id based
* on the cgroup within which the current task is running.
+ *
+ * void* get_local_storage(void *map, u64 flags)
+ * Description
+ * Get the pointer to the local storage area.
+ * The type and the size of the local storage is defined
+ * by the *map* argument.
+ * The *flags* meaning is specific for each map type,
+ * and has to be 0 for cgroup local storage.
+ *
+ * Depending on the bpf program type, a local storage area
+ * can be shared between multiple instances of the bpf program,
+ * running simultaneously.
+ *
+ * A user should care about the synchronization by himself.
+ * For example, by using the BPF_STX_XADD instruction to alter
+ * the shared data.
+ * Return
+ * Pointer to the local storage area.
+ *
+ * int bpf_sk_select_reuseport(struct sk_reuseport_md *reuse, struct bpf_map *map, void *key, u64 flags)
+ * Description
+ * Select a SO_REUSEPORT sk from a BPF_MAP_TYPE_REUSEPORT_ARRAY map
+ * It checks the selected sk is matching the incoming
+ * request in the skb.
+ * Return
+ * 0 on success, or a negative error in case of failure.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2157,7 +2223,10 @@ union bpf_attr {
FN(rc_repeat), \
FN(rc_keydown), \
FN(skb_cgroup_id), \
- FN(get_current_cgroup_id),
+ FN(get_current_cgroup_id), \
+ FN(get_local_storage), \
+ FN(sk_select_reuseport), \
+ FN(skb_ancestor_cgroup_id),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -2374,6 +2443,30 @@ struct sk_msg_md {
__u32 local_port; /* stored in host byte order */
};
+struct sk_reuseport_md {
+ /*
+ * Start of directly accessible data. It begins from
+ * the tcp/udp header.
+ */
+ void *data;
+ void *data_end; /* End of directly accessible data */
+ /*
+ * Total length of packet (starting from the tcp/udp header).
+ * Note that the directly accessible bytes (data_end - data)
+ * could be less than this "len". Those bytes could be
+ * indirectly read by a helper "bpf_skb_load_bytes()".
+ */
+ __u32 len;
+ /*
+ * Eth protocol in the mac header (network byte order). e.g.
+ * ETH_P_IP(0x0800) and ETH_P_IPV6(0x86DD)
+ */
+ __u32 eth_protocol;
+ __u32 ip_protocol; /* IP protocol. e.g. IPPROTO_TCP, IPPROTO_UDP */
+ __u32 bind_inany; /* Is sock bound to an INANY address? */
+ __u32 hash; /* A hash of the packet 4 tuples */
+};
+
#define BPF_TAG_SIZE 8
struct bpf_prog_info {
@@ -2555,6 +2648,9 @@ enum {
* Arg1: old_state
* Arg2: new_state
*/
+ BPF_SOCK_OPS_TCP_LISTEN_CB, /* Called on listen(2), right after
+ * socket transition to LISTEN state.
+ */
};
/* List of TCP states. There is a build check in net/ipv4/tcp.c to detect
@@ -2612,6 +2708,18 @@ struct bpf_raw_tracepoint_args {
#define BPF_FIB_LOOKUP_DIRECT BIT(0)
#define BPF_FIB_LOOKUP_OUTPUT BIT(1)
+enum {
+ BPF_FIB_LKUP_RET_SUCCESS, /* lookup successful */
+ BPF_FIB_LKUP_RET_BLACKHOLE, /* dest is blackholed; can be dropped */
+ BPF_FIB_LKUP_RET_UNREACHABLE, /* dest is unreachable; can be dropped */
+ BPF_FIB_LKUP_RET_PROHIBIT, /* dest not allowed; can be dropped */
+ BPF_FIB_LKUP_RET_NOT_FWDED, /* packet is not forwarded */
+ BPF_FIB_LKUP_RET_FWD_DISABLED, /* fwding is not enabled on ingress */
+ BPF_FIB_LKUP_RET_UNSUPP_LWT, /* fwd requires encapsulation */
+ BPF_FIB_LKUP_RET_NO_NEIGH, /* no neighbor entry for nh */
+ BPF_FIB_LKUP_RET_FRAG_NEEDED, /* fragmentation required to fwd */
+};
+
struct bpf_fib_lookup {
/* input: network family for lookup (AF_INET, AF_INET6)
* output: network family of egress nexthop
@@ -2625,7 +2733,11 @@ struct bpf_fib_lookup {
/* total length of packet from network header - used for MTU check */
__u16 tot_len;
- __u32 ifindex; /* L3 device index for lookup */
+
+ /* input: L3 device index for lookup
+ * output: device index from FIB lookup
+ */
+ __u32 ifindex;
union {
/* inputs to lookup */
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
index 0b5ddbe135a4..972265f32871 100644
--- a/include/uapi/linux/btf.h
+++ b/include/uapi/linux/btf.h
@@ -76,7 +76,7 @@ struct btf_type {
*/
#define BTF_INT_ENCODING(VAL) (((VAL) & 0x0f000000) >> 24)
#define BTF_INT_OFFSET(VAL) (((VAL & 0x00ff0000)) >> 16)
-#define BTF_INT_BITS(VAL) ((VAL) & 0x0000ffff)
+#define BTF_INT_BITS(VAL) ((VAL) & 0x000000ff)
/* Attributes stored in the BTF_INT_ENCODING */
#define BTF_INT_SIGNED (1 << 0)
diff --git a/include/uapi/linux/can.h b/include/uapi/linux/can.h
index d7f97ac197a9..0afb7d8e867f 100644
--- a/include/uapi/linux/can.h
+++ b/include/uapi/linux/can.h
@@ -77,7 +77,7 @@ typedef __u32 canid_t;
/*
* Controller Area Network Error Message Frame Mask structure
*
- * bit 0-28 : error class mask (see include/linux/can/error.h)
+ * bit 0-28 : error class mask (see include/uapi/linux/can/error.h)
* bit 29-31 : set to zero
*/
typedef __u32 can_err_mask_t;
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
index 20fe091b7e96..097fcd812471 100644
--- a/include/uapi/linux/cec.h
+++ b/include/uapi/linux/cec.h
@@ -384,6 +384,8 @@ struct cec_log_addrs {
#define CEC_EVENT_PIN_CEC_HIGH 4
#define CEC_EVENT_PIN_HPD_LOW 5
#define CEC_EVENT_PIN_HPD_HIGH 6
+#define CEC_EVENT_PIN_5V_LOW 7
+#define CEC_EVENT_PIN_5V_HIGH 8
#define CEC_EVENT_FL_INITIAL_STATE (1 << 0)
#define CEC_EVENT_FL_DROPPED_EVENTS (1 << 1)
diff --git a/include/uapi/linux/dcbnl.h b/include/uapi/linux/dcbnl.h
index 60aa2e446698..69df19aa8e72 100644
--- a/include/uapi/linux/dcbnl.h
+++ b/include/uapi/linux/dcbnl.h
@@ -233,7 +233,8 @@ struct cee_pfc {
* 2 Well known port number over TCP or SCTP
* 3 Well known port number over UDP or DCCP
* 4 Well known port number over TCP, SCTP, UDP, or DCCP
- * 5-7 Reserved
+ * 5 Differentiated Services Code Point (DSCP) value
+ * 6-7 Reserved
*
* Selector field values for CEE
* 0 Ethertype
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 75cb5450c851..79407bbd296d 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -78,6 +78,17 @@ enum devlink_command {
*/
DEVLINK_CMD_RELOAD,
+ DEVLINK_CMD_PARAM_GET, /* can dump */
+ DEVLINK_CMD_PARAM_SET,
+ DEVLINK_CMD_PARAM_NEW,
+ DEVLINK_CMD_PARAM_DEL,
+
+ DEVLINK_CMD_REGION_GET,
+ DEVLINK_CMD_REGION_SET,
+ DEVLINK_CMD_REGION_NEW,
+ DEVLINK_CMD_REGION_DEL,
+ DEVLINK_CMD_REGION_READ,
+
/* add new commands above here */
__DEVLINK_CMD_MAX,
DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
@@ -142,6 +153,16 @@ enum devlink_port_flavour {
*/
};
+enum devlink_param_cmode {
+ DEVLINK_PARAM_CMODE_RUNTIME,
+ DEVLINK_PARAM_CMODE_DRIVERINIT,
+ DEVLINK_PARAM_CMODE_PERMANENT,
+
+ /* Add new configuration modes above */
+ __DEVLINK_PARAM_CMODE_MAX,
+ DEVLINK_PARAM_CMODE_MAX = __DEVLINK_PARAM_CMODE_MAX - 1
+};
+
enum devlink_attr {
/* don't change the order or add anything between, this is ABI! */
DEVLINK_ATTR_UNSPEC,
@@ -238,6 +259,27 @@ enum devlink_attr {
DEVLINK_ATTR_PORT_NUMBER, /* u32 */
DEVLINK_ATTR_PORT_SPLIT_SUBPORT_NUMBER, /* u32 */
+ DEVLINK_ATTR_PARAM, /* nested */
+ DEVLINK_ATTR_PARAM_NAME, /* string */
+ DEVLINK_ATTR_PARAM_GENERIC, /* flag */
+ DEVLINK_ATTR_PARAM_TYPE, /* u8 */
+ DEVLINK_ATTR_PARAM_VALUES_LIST, /* nested */
+ DEVLINK_ATTR_PARAM_VALUE, /* nested */
+ DEVLINK_ATTR_PARAM_VALUE_DATA, /* dynamic */
+ DEVLINK_ATTR_PARAM_VALUE_CMODE, /* u8 */
+
+ DEVLINK_ATTR_REGION_NAME, /* string */
+ DEVLINK_ATTR_REGION_SIZE, /* u64 */
+ DEVLINK_ATTR_REGION_SNAPSHOTS, /* nested */
+ DEVLINK_ATTR_REGION_SNAPSHOT, /* nested */
+ DEVLINK_ATTR_REGION_SNAPSHOT_ID, /* u32 */
+
+ DEVLINK_ATTR_REGION_CHUNKS, /* nested */
+ DEVLINK_ATTR_REGION_CHUNK, /* nested */
+ DEVLINK_ATTR_REGION_CHUNK_DATA, /* binary */
+ DEVLINK_ATTR_REGION_CHUNK_ADDR, /* u64 */
+ DEVLINK_ATTR_REGION_CHUNK_LEN, /* u64 */
+
/* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX,
diff --git a/include/uapi/linux/dvb/audio.h b/include/uapi/linux/dvb/audio.h
index 69f7a85d81b1..afeae063e640 100644
--- a/include/uapi/linux/dvb/audio.h
+++ b/include/uapi/linux/dvb/audio.h
@@ -67,27 +67,6 @@ typedef struct audio_status {
} audio_status_t; /* separate decoder hardware */
-typedef
-struct audio_karaoke { /* if Vocal1 or Vocal2 are non-zero, they get mixed */
- int vocal1; /* into left and right t at 70% each */
- int vocal2; /* if both, Vocal1 and Vocal2 are non-zero, Vocal1 gets*/
- int melody; /* mixed into the left channel and */
- /* Vocal2 into the right channel at 100% each. */
- /* if Melody is non-zero, the melody channel gets mixed*/
-} audio_karaoke_t; /* into left and right */
-
-
-typedef __u16 audio_attributes_t;
-/* bits: descr. */
-/* 15-13 audio coding mode (0=ac3, 2=mpeg1, 3=mpeg2ext, 4=LPCM, 6=DTS, */
-/* 12 multichannel extension */
-/* 11-10 audio type (0=not spec, 1=language included) */
-/* 9- 8 audio application mode (0=not spec, 1=karaoke, 2=surround) */
-/* 7- 6 Quantization / DRC (mpeg audio: 1=DRC exists)(lpcm: 0=16bit, */
-/* 5- 4 Sample frequency fs (0=48kHz, 1=96kHz) */
-/* 2- 0 number of audio channels (n+1 channels) */
-
-
/* for GET_CAPABILITIES and SET_FORMAT, the latter should only set one bit */
#define AUDIO_CAP_DTS 1
#define AUDIO_CAP_LPCM 2
@@ -115,22 +94,6 @@ typedef __u16 audio_attributes_t;
#define AUDIO_SET_ID _IO('o', 13)
#define AUDIO_SET_MIXER _IOW('o', 14, audio_mixer_t)
#define AUDIO_SET_STREAMTYPE _IO('o', 15)
-#define AUDIO_SET_EXT_ID _IO('o', 16)
-#define AUDIO_SET_ATTRIBUTES _IOW('o', 17, audio_attributes_t)
-#define AUDIO_SET_KARAOKE _IOW('o', 18, audio_karaoke_t)
-
-/**
- * AUDIO_GET_PTS
- *
- * Read the 33 bit presentation time stamp as defined
- * in ITU T-REC-H.222.0 / ISO/IEC 13818-1.
- *
- * The PTS should belong to the currently played
- * frame if possible, but may also be a value close to it
- * like the PTS of the last decoded frame or the last PTS
- * extracted by the PES parser.
- */
-#define AUDIO_GET_PTS _IOR('o', 19, __u64)
#define AUDIO_BILINGUAL_CHANNEL_SELECT _IO('o', 20)
#endif /* _DVBAUDIO_H_ */
diff --git a/include/uapi/linux/dvb/video.h b/include/uapi/linux/dvb/video.h
index df3d7028c807..43ba8b0a3d14 100644
--- a/include/uapi/linux/dvb/video.h
+++ b/include/uapi/linux/dvb/video.h
@@ -38,18 +38,6 @@ typedef enum {
typedef enum {
- VIDEO_SYSTEM_PAL,
- VIDEO_SYSTEM_NTSC,
- VIDEO_SYSTEM_PALN,
- VIDEO_SYSTEM_PALNc,
- VIDEO_SYSTEM_PALM,
- VIDEO_SYSTEM_NTSC60,
- VIDEO_SYSTEM_PAL60,
- VIDEO_SYSTEM_PALM60
-} video_system_t;
-
-
-typedef enum {
VIDEO_PAN_SCAN, /* use pan and scan format */
VIDEO_LETTER_BOX, /* use letterbox format */
VIDEO_CENTER_CUT_OUT /* use center cut out format */
@@ -160,44 +148,6 @@ struct video_still_picture {
};
-typedef
-struct video_highlight {
- int active; /* 1=show highlight, 0=hide highlight */
- __u8 contrast1; /* 7- 4 Pattern pixel contrast */
- /* 3- 0 Background pixel contrast */
- __u8 contrast2; /* 7- 4 Emphasis pixel-2 contrast */
- /* 3- 0 Emphasis pixel-1 contrast */
- __u8 color1; /* 7- 4 Pattern pixel color */
- /* 3- 0 Background pixel color */
- __u8 color2; /* 7- 4 Emphasis pixel-2 color */
- /* 3- 0 Emphasis pixel-1 color */
- __u32 ypos; /* 23-22 auto action mode */
- /* 21-12 start y */
- /* 9- 0 end y */
- __u32 xpos; /* 23-22 button color number */
- /* 21-12 start x */
- /* 9- 0 end x */
-} video_highlight_t;
-
-
-typedef struct video_spu {
- int active;
- int stream_id;
-} video_spu_t;
-
-
-typedef struct video_spu_palette { /* SPU Palette information */
- int length;
- __u8 __user *palette;
-} video_spu_palette_t;
-
-
-typedef struct video_navi_pack {
- int length; /* 0 ... 1024 */
- __u8 data[1024];
-} video_navi_pack_t;
-
-
typedef __u16 video_attributes_t;
/* bits: descr. */
/* 15-14 Video compression mode (0=MPEG-1, 1=MPEG-2) */
@@ -242,17 +192,9 @@ typedef __u16 video_attributes_t;
#define VIDEO_SLOWMOTION _IO('o', 32)
#define VIDEO_GET_CAPABILITIES _IOR('o', 33, unsigned int)
#define VIDEO_CLEAR_BUFFER _IO('o', 34)
-#define VIDEO_SET_ID _IO('o', 35)
#define VIDEO_SET_STREAMTYPE _IO('o', 36)
#define VIDEO_SET_FORMAT _IO('o', 37)
-#define VIDEO_SET_SYSTEM _IO('o', 38)
-#define VIDEO_SET_HIGHLIGHT _IOW('o', 39, video_highlight_t)
-#define VIDEO_SET_SPU _IOW('o', 50, video_spu_t)
-#define VIDEO_SET_SPU_PALETTE _IOW('o', 51, video_spu_palette_t)
-#define VIDEO_GET_NAVI _IOR('o', 52, video_navi_pack_t)
-#define VIDEO_SET_ATTRIBUTES _IO('o', 53)
#define VIDEO_GET_SIZE _IOR('o', 55, video_size_t)
-#define VIDEO_GET_FRAME_RATE _IOR('o', 56, unsigned int)
/**
* VIDEO_GET_PTS
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index 4e12c423b9fe..c5358e0ae7c5 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -422,6 +422,8 @@ typedef struct elf64_shdr {
#define NT_ARM_SVE 0x405 /* ARM Scalable Vector Extension registers */
#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
#define NT_VMCOREDD 0x700 /* Vmcore Device Dump Note */
+#define NT_MIPS_DSP 0x800 /* MIPS DSP ASE registers */
+#define NT_MIPS_FP_MODE 0x801 /* MIPS floating-point mode */
/* Note header in a PT_NOTE section */
typedef struct elf32_note {
diff --git a/include/uapi/linux/errqueue.h b/include/uapi/linux/errqueue.h
index dc64cfaf13da..c0151200f7d1 100644
--- a/include/uapi/linux/errqueue.h
+++ b/include/uapi/linux/errqueue.h
@@ -20,12 +20,16 @@ struct sock_extended_err {
#define SO_EE_ORIGIN_ICMP6 3
#define SO_EE_ORIGIN_TXSTATUS 4
#define SO_EE_ORIGIN_ZEROCOPY 5
+#define SO_EE_ORIGIN_TXTIME 6
#define SO_EE_ORIGIN_TIMESTAMPING SO_EE_ORIGIN_TXSTATUS
#define SO_EE_OFFENDER(ee) ((struct sockaddr*)((ee)+1))
#define SO_EE_CODE_ZEROCOPY_COPIED 1
+#define SO_EE_CODE_TXTIME_INVALID_PARAM 1
+#define SO_EE_CODE_TXTIME_MISSED 2
+
/**
* struct scm_timestamping - timestamps exposed through cmsg
*
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 4ca65b56084f..dc69391d2bba 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -226,7 +226,7 @@ enum tunable_id {
ETHTOOL_TX_COPYBREAK,
ETHTOOL_PFC_PREVENTION_TOUT, /* timeout in msecs */
/*
- * Add your fresh new tubale attribute above and remember to update
+ * Add your fresh new tunable attribute above and remember to update
* tunable_strings[] in net/core/ethtool.c
*/
__ETHTOOL_TUNABLE_COUNT,
@@ -870,7 +870,8 @@ struct ethtool_flow_ext {
* includes the %FLOW_EXT or %FLOW_MAC_EXT flag
* (see &struct ethtool_flow_ext description).
* @ring_cookie: RX ring/queue index to deliver to, or %RX_CLS_FLOW_DISC
- * if packets should be discarded
+ * if packets should be discarded, or %RX_CLS_FLOW_WAKE if the
+ * packets should be used for Wake-on-LAN with %WAKE_FILTER
* @location: Location of rule in the table. Locations must be
* numbered such that a flow matching multiple rules will be
* classified according to the first (lowest numbered) rule.
@@ -902,13 +903,13 @@ struct ethtool_rx_flow_spec {
static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie)
{
return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie;
-};
+}
static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie)
{
return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >>
ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
-};
+}
/**
* struct ethtool_rxnfc - command to get or set RX flow classification rules
@@ -1634,6 +1635,7 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define WAKE_ARP (1 << 4)
#define WAKE_MAGIC (1 << 5)
#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */
+#define WAKE_FILTER (1 << 7)
/* L2-L4 network traffic flow types */
#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */
@@ -1671,6 +1673,7 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define RXH_DISCARD (1 << 31)
#define RX_CLS_FLOW_DISC 0xffffffffffffffffULL
+#define RX_CLS_FLOW_WAKE 0xfffffffffffffffeULL
/* Special RX classification rule insert location values */
#define RX_CLS_LOC_SPECIAL 0x80000000 /* flag */
diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
index bf48e71f2634..8a3432d0f0dc 100644
--- a/include/uapi/linux/eventpoll.h
+++ b/include/uapi/linux/eventpoll.h
@@ -42,7 +42,7 @@
#define EPOLLRDHUP (__force __poll_t)0x00002000
/* Set exclusive wakeup mode for the target file descriptor */
-#define EPOLLEXCLUSIVE (__force __poll_t)(1U << 28)
+#define EPOLLEXCLUSIVE ((__force __poll_t)(1U << 28))
/*
* Request the handling of system wakeup events so as to prevent system suspends
@@ -54,13 +54,13 @@
*
* Requires CAP_BLOCK_SUSPEND
*/
-#define EPOLLWAKEUP (__force __poll_t)(1U << 29)
+#define EPOLLWAKEUP ((__force __poll_t)(1U << 29))
/* Set the One Shot behaviour for the target file descriptor */
-#define EPOLLONESHOT (__force __poll_t)(1U << 30)
+#define EPOLLONESHOT ((__force __poll_t)(1U << 30))
/* Set the Edge Triggered behaviour for the target file descriptor */
-#define EPOLLET (__force __poll_t)(1U << 31)
+#define EPOLLET ((__force __poll_t)(1U << 31))
/*
* On x86-64 make the 64bit structure have the same alignment as the
diff --git a/include/uapi/linux/fpga-dfl.h b/include/uapi/linux/fpga-dfl.h
new file mode 100644
index 000000000000..2e324e515c41
--- /dev/null
+++ b/include/uapi/linux/fpga-dfl.h
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Header File for FPGA DFL User API
+ *
+ * Copyright (C) 2017-2018 Intel Corporation, Inc.
+ *
+ * Authors:
+ * Kang Luwei <luwei.kang@intel.com>
+ * Zhang Yi <yi.z.zhang@intel.com>
+ * Wu Hao <hao.wu@intel.com>
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ */
+
+#ifndef _UAPI_LINUX_FPGA_DFL_H
+#define _UAPI_LINUX_FPGA_DFL_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+#define DFL_FPGA_API_VERSION 0
+
+/*
+ * The IOCTL interface for DFL based FPGA is designed for extensibility by
+ * embedding the structure length (argsz) and flags into structures passed
+ * between kernel and userspace. This design referenced the VFIO IOCTL
+ * interface (include/uapi/linux/vfio.h).
+ */
+
+#define DFL_FPGA_MAGIC 0xB6
+
+#define DFL_FPGA_BASE 0
+#define DFL_PORT_BASE 0x40
+#define DFL_FME_BASE 0x80
+
+/* Common IOCTLs for both FME and AFU file descriptor */
+
+/**
+ * DFL_FPGA_GET_API_VERSION - _IO(DFL_FPGA_MAGIC, DFL_FPGA_BASE + 0)
+ *
+ * Report the version of the driver API.
+ * Return: Driver API Version.
+ */
+
+#define DFL_FPGA_GET_API_VERSION _IO(DFL_FPGA_MAGIC, DFL_FPGA_BASE + 0)
+
+/**
+ * DFL_FPGA_CHECK_EXTENSION - _IO(DFL_FPGA_MAGIC, DFL_FPGA_BASE + 1)
+ *
+ * Check whether an extension is supported.
+ * Return: 0 if not supported, otherwise the extension is supported.
+ */
+
+#define DFL_FPGA_CHECK_EXTENSION _IO(DFL_FPGA_MAGIC, DFL_FPGA_BASE + 1)
+
+/* IOCTLs for AFU file descriptor */
+
+/**
+ * DFL_FPGA_PORT_RESET - _IO(DFL_FPGA_MAGIC, DFL_PORT_BASE + 0)
+ *
+ * Reset the FPGA Port and its AFU. No parameters are supported.
+ * Userspace can do Port reset at any time, e.g. during DMA or PR. But
+ * it should never cause any system level issue, only functional failure
+ * (e.g. DMA or PR operation failure) and be recoverable from the failure.
+ * Return: 0 on success, -errno of failure
+ */
+
+#define DFL_FPGA_PORT_RESET _IO(DFL_FPGA_MAGIC, DFL_PORT_BASE + 0)
+
+/**
+ * DFL_FPGA_PORT_GET_INFO - _IOR(DFL_FPGA_MAGIC, DFL_PORT_BASE + 1,
+ * struct dfl_fpga_port_info)
+ *
+ * Retrieve information about the fpga port.
+ * Driver fills the info in provided struct dfl_fpga_port_info.
+ * Return: 0 on success, -errno on failure.
+ */
+struct dfl_fpga_port_info {
+ /* Input */
+ __u32 argsz; /* Structure length */
+ /* Output */
+ __u32 flags; /* Zero for now */
+ __u32 num_regions; /* The number of supported regions */
+ __u32 num_umsgs; /* The number of allocated umsgs */
+};
+
+#define DFL_FPGA_PORT_GET_INFO _IO(DFL_FPGA_MAGIC, DFL_PORT_BASE + 1)
+
+/**
+ * FPGA_PORT_GET_REGION_INFO - _IOWR(FPGA_MAGIC, PORT_BASE + 2,
+ * struct dfl_fpga_port_region_info)
+ *
+ * Retrieve information about a device memory region.
+ * Caller provides struct dfl_fpga_port_region_info with index value set.
+ * Driver returns the region info in other fields.
+ * Return: 0 on success, -errno on failure.
+ */
+struct dfl_fpga_port_region_info {
+ /* input */
+ __u32 argsz; /* Structure length */
+ /* Output */
+ __u32 flags; /* Access permission */
+#define DFL_PORT_REGION_READ (1 << 0) /* Region is readable */
+#define DFL_PORT_REGION_WRITE (1 << 1) /* Region is writable */
+#define DFL_PORT_REGION_MMAP (1 << 2) /* Can be mmaped to userspace */
+ /* Input */
+ __u32 index; /* Region index */
+#define DFL_PORT_REGION_INDEX_AFU 0 /* AFU */
+#define DFL_PORT_REGION_INDEX_STP 1 /* Signal Tap */
+ __u32 padding;
+ /* Output */
+ __u64 size; /* Region size (bytes) */
+ __u64 offset; /* Region offset from start of device fd */
+};
+
+#define DFL_FPGA_PORT_GET_REGION_INFO _IO(DFL_FPGA_MAGIC, DFL_PORT_BASE + 2)
+
+/**
+ * DFL_FPGA_PORT_DMA_MAP - _IOWR(DFL_FPGA_MAGIC, DFL_PORT_BASE + 3,
+ * struct dfl_fpga_port_dma_map)
+ *
+ * Map the dma memory per user_addr and length which are provided by caller.
+ * Driver fills the iova in provided struct afu_port_dma_map.
+ * This interface only accepts page-size aligned user memory for dma mapping.
+ * Return: 0 on success, -errno on failure.
+ */
+struct dfl_fpga_port_dma_map {
+ /* Input */
+ __u32 argsz; /* Structure length */
+ __u32 flags; /* Zero for now */
+ __u64 user_addr; /* Process virtual address */
+ __u64 length; /* Length of mapping (bytes)*/
+ /* Output */
+ __u64 iova; /* IO virtual address */
+};
+
+#define DFL_FPGA_PORT_DMA_MAP _IO(DFL_FPGA_MAGIC, DFL_PORT_BASE + 3)
+
+/**
+ * DFL_FPGA_PORT_DMA_UNMAP - _IOW(FPGA_MAGIC, PORT_BASE + 4,
+ * struct dfl_fpga_port_dma_unmap)
+ *
+ * Unmap the dma memory per iova provided by caller.
+ * Return: 0 on success, -errno on failure.
+ */
+struct dfl_fpga_port_dma_unmap {
+ /* Input */
+ __u32 argsz; /* Structure length */
+ __u32 flags; /* Zero for now */
+ __u64 iova; /* IO virtual address */
+};
+
+#define DFL_FPGA_PORT_DMA_UNMAP _IO(DFL_FPGA_MAGIC, DFL_PORT_BASE + 4)
+
+/* IOCTLs for FME file descriptor */
+
+/**
+ * DFL_FPGA_FME_PORT_PR - _IOW(DFL_FPGA_MAGIC, DFL_FME_BASE + 0,
+ * struct dfl_fpga_fme_port_pr)
+ *
+ * Driver does Partial Reconfiguration based on Port ID and Buffer (Image)
+ * provided by caller.
+ * Return: 0 on success, -errno on failure.
+ * If DFL_FPGA_FME_PORT_PR returns -EIO, that indicates the HW has detected
+ * some errors during PR, under this case, the user can fetch HW error info
+ * from the status of FME's fpga manager.
+ */
+
+struct dfl_fpga_fme_port_pr {
+ /* Input */
+ __u32 argsz; /* Structure length */
+ __u32 flags; /* Zero for now */
+ __u32 port_id;
+ __u32 buffer_size;
+ __u64 buffer_address; /* Userspace address to the buffer for PR */
+};
+
+#define DFL_FPGA_FME_PORT_PR _IO(DFL_FPGA_MAGIC, DFL_FME_BASE + 0)
+
+#endif /* _UAPI_LINUX_FPGA_DFL_H */
diff --git a/include/uapi/linux/fsi.h b/include/uapi/linux/fsi.h
new file mode 100644
index 000000000000..da577ecd90e7
--- /dev/null
+++ b/include/uapi/linux/fsi.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_FSI_H
+#define _UAPI_LINUX_FSI_H
+
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
+/*
+ * /dev/scom "raw" ioctl interface
+ *
+ * The driver supports a high level "read/write" interface which
+ * handles retries and converts the status to Linux error codes,
+ * however low level tools an debugger need to access the "raw"
+ * HW status information and interpret it themselves, so this
+ * ioctl interface is also provided for their use case.
+ */
+
+/* Structure for SCOM read/write */
+struct scom_access {
+ __u64 addr; /* SCOM address, supports indirect */
+ __u64 data; /* SCOM data (in for write, out for read) */
+ __u64 mask; /* Data mask for writes */
+ __u32 intf_errors; /* Interface error flags */
+#define SCOM_INTF_ERR_PARITY 0x00000001 /* Parity error */
+#define SCOM_INTF_ERR_PROTECTION 0x00000002 /* Blocked by secure boot */
+#define SCOM_INTF_ERR_ABORT 0x00000004 /* PIB reset during access */
+#define SCOM_INTF_ERR_UNKNOWN 0x80000000 /* Unknown error */
+ /*
+ * Note: Any other bit set in intf_errors need to be considered as an
+ * error. Future implementations may define new error conditions. The
+ * pib_status below is only valid if intf_errors is 0.
+ */
+ __u8 pib_status; /* 3-bit PIB status */
+#define SCOM_PIB_SUCCESS 0 /* Access successful */
+#define SCOM_PIB_BLOCKED 1 /* PIB blocked, pls retry */
+#define SCOM_PIB_OFFLINE 2 /* Chiplet offline */
+#define SCOM_PIB_PARTIAL 3 /* Partial good */
+#define SCOM_PIB_BAD_ADDR 4 /* Invalid address */
+#define SCOM_PIB_CLK_ERR 5 /* Clock error */
+#define SCOM_PIB_PARITY_ERR 6 /* Parity error on the PIB bus */
+#define SCOM_PIB_TIMEOUT 7 /* Bus timeout */
+ __u8 pad;
+};
+
+/* Flags for SCOM check */
+#define SCOM_CHECK_SUPPORTED 0x00000001 /* Interface supported */
+#define SCOM_CHECK_PROTECTED 0x00000002 /* Interface blocked by secure boot */
+
+/* Flags for SCOM reset */
+#define SCOM_RESET_INTF 0x00000001 /* Reset interface */
+#define SCOM_RESET_PIB 0x00000002 /* Reset PIB */
+
+#define FSI_SCOM_CHECK _IOR('s', 0x00, __u32)
+#define FSI_SCOM_READ _IOWR('s', 0x01, struct scom_access)
+#define FSI_SCOM_WRITE _IOWR('s', 0x02, struct scom_access)
+#define FSI_SCOM_RESET _IOW('s', 0x03, __u32)
+
+#endif /* _UAPI_LINUX_FSI_H */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index cf01b6824244..43391e2d1153 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -164,6 +164,8 @@ enum {
IFLA_CARRIER_UP_COUNT,
IFLA_CARRIER_DOWN_COUNT,
IFLA_NEW_IFINDEX,
+ IFLA_MIN_MTU,
+ IFLA_MAX_MTU,
__IFLA_MAX
};
@@ -334,6 +336,7 @@ enum {
IFLA_BRPORT_GROUP_FWD_MASK,
IFLA_BRPORT_NEIGH_SUPPRESS,
IFLA_BRPORT_ISOLATED,
+ IFLA_BRPORT_BACKUP_PORT,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -459,6 +462,16 @@ enum {
#define IFLA_MACSEC_MAX (__IFLA_MACSEC_MAX - 1)
+/* XFRM section */
+enum {
+ IFLA_XFRM_UNSPEC,
+ IFLA_XFRM_LINK,
+ IFLA_XFRM_IF_ID,
+ __IFLA_XFRM_MAX
+};
+
+#define IFLA_XFRM_MAX (__IFLA_XFRM_MAX - 1)
+
enum macsec_validation_type {
MACSEC_VALIDATE_DISABLED = 0,
MACSEC_VALIDATE_CHECK = 1,
@@ -920,6 +933,7 @@ enum {
XDP_ATTACHED_DRV,
XDP_ATTACHED_SKB,
XDP_ATTACHED_HW,
+ XDP_ATTACHED_MULTI,
};
enum {
@@ -928,6 +942,9 @@ enum {
IFLA_XDP_ATTACHED,
IFLA_XDP_FLAGS,
IFLA_XDP_PROG_ID,
+ IFLA_XDP_DRV_PROG_ID,
+ IFLA_XDP_SKB_PROG_ID,
+ IFLA_XDP_HW_PROG_ID,
__IFLA_XDP_MAX,
};
diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h
index 4213cdf88e3c..92baabc103ac 100644
--- a/include/uapi/linux/iio/types.h
+++ b/include/uapi/linux/iio/types.h
@@ -44,6 +44,8 @@ enum iio_chan_type {
IIO_COUNT,
IIO_INDEX,
IIO_GRAVITY,
+ IIO_POSITIONRELATIVE,
+ IIO_PHASE,
};
enum iio_modifier {
@@ -84,6 +86,7 @@ enum iio_modifier {
IIO_MOD_CO2,
IIO_MOD_VOC,
IIO_MOD_LIGHT_UV,
+ IIO_MOD_LIGHT_DUV,
};
enum iio_event_type {
diff --git a/include/uapi/linux/ila.h b/include/uapi/linux/ila.h
index 483b77af4eb8..db45d3e49a12 100644
--- a/include/uapi/linux/ila.h
+++ b/include/uapi/linux/ila.h
@@ -30,6 +30,7 @@ enum {
ILA_CMD_ADD,
ILA_CMD_DEL,
ILA_CMD_GET,
+ ILA_CMD_FLUSH,
__ILA_CMD_MAX,
};
diff --git a/include/uapi/linux/inotify.h b/include/uapi/linux/inotify.h
index 4800bf2a531d..884b4846b630 100644
--- a/include/uapi/linux/inotify.h
+++ b/include/uapi/linux/inotify.h
@@ -53,6 +53,7 @@ struct inotify_event {
#define IN_ONLYDIR 0x01000000 /* only watch the path if it is a directory */
#define IN_DONT_FOLLOW 0x02000000 /* don't follow a sym link */
#define IN_EXCL_UNLINK 0x04000000 /* exclude events on unlinked objects */
+#define IN_MASK_CREATE 0x10000000 /* only create watches */
#define IN_MASK_ADD 0x20000000 /* add to the mask of an already existing watch */
#define IN_ISDIR 0x40000000 /* event occurred against dir */
#define IN_ONESHOT 0x80000000 /* only send event once */
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index 7288a7c573cc..fb78f6f500f3 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -270,10 +270,11 @@ struct input_mask {
/*
* MT_TOOL types
*/
-#define MT_TOOL_FINGER 0
-#define MT_TOOL_PEN 1
-#define MT_TOOL_PALM 2
-#define MT_TOOL_MAX 2
+#define MT_TOOL_FINGER 0x00
+#define MT_TOOL_PEN 0x01
+#define MT_TOOL_PALM 0x02
+#define MT_TOOL_DIAL 0x0a
+#define MT_TOOL_MAX 0x0f
/*
* Values describing the status of a force-feedback effect
diff --git a/include/uapi/linux/ip.h b/include/uapi/linux/ip.h
index b24a742beae5..e42d13b55cf3 100644
--- a/include/uapi/linux/ip.h
+++ b/include/uapi/linux/ip.h
@@ -168,6 +168,7 @@ enum
IPV4_DEVCONF_IGNORE_ROUTES_WITH_LINKDOWN,
IPV4_DEVCONF_DROP_UNICAST_IN_L2_MULTICAST,
IPV4_DEVCONF_DROP_GRATUITOUS_ARP,
+ IPV4_DEVCONF_BC_FORWARDING,
__IPV4_DEVCONF_MAX
};
diff --git a/include/uapi/linux/joystick.h b/include/uapi/linux/joystick.h
index 64aabb84a66d..192bf2cf182d 100644
--- a/include/uapi/linux/joystick.h
+++ b/include/uapi/linux/joystick.h
@@ -18,10 +18,6 @@
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- *
- * Should you need to contact me, the author, you can do so either by
- * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail:
- * Vojtech Pavlik, Ucitelska 1576, Prague 8, 182 00 Czech Republic
*/
#ifndef _UAPI_LINUX_JOYSTICK_H
#define _UAPI_LINUX_JOYSTICK_H
diff --git a/include/uapi/linux/keyboard.h b/include/uapi/linux/keyboard.h
index ab4108c83186..4846716e7c5c 100644
--- a/include/uapi/linux/keyboard.h
+++ b/include/uapi/linux/keyboard.h
@@ -357,8 +357,29 @@
#define K_DTILDE K(KT_DEAD,3)
#define K_DDIERE K(KT_DEAD,4)
#define K_DCEDIL K(KT_DEAD,5)
+#define K_DMACRON K(KT_DEAD,6)
+#define K_DBREVE K(KT_DEAD,7)
+#define K_DABDOT K(KT_DEAD,8)
+#define K_DABRING K(KT_DEAD,9)
+#define K_DDBACUTE K(KT_DEAD,10)
+#define K_DCARON K(KT_DEAD,11)
+#define K_DOGONEK K(KT_DEAD,12)
+#define K_DIOTA K(KT_DEAD,13)
+#define K_DVOICED K(KT_DEAD,14)
+#define K_DSEMVOICED K(KT_DEAD,15)
+#define K_DBEDOT K(KT_DEAD,16)
+#define K_DHOOK K(KT_DEAD,17)
+#define K_DHORN K(KT_DEAD,18)
+#define K_DSTROKE K(KT_DEAD,19)
+#define K_DABCOMMA K(KT_DEAD,20)
+#define K_DABREVCOMMA K(KT_DEAD,21)
+#define K_DDBGRAVE K(KT_DEAD,22)
+#define K_DINVBREVE K(KT_DEAD,23)
+#define K_DBECOMMA K(KT_DEAD,24)
+#define K_DCURRENCY K(KT_DEAD,25)
+#define K_DGREEK K(KT_DEAD,26)
-#define NR_DEAD 6
+#define NR_DEAD 27
#define K_DOWN K(KT_CUR,0)
#define K_LEFT K(KT_CUR,1)
diff --git a/include/uapi/linux/kfd_ioctl.h b/include/uapi/linux/kfd_ioctl.h
index b4f5073dbac2..01674b56e14f 100644
--- a/include/uapi/linux/kfd_ioctl.h
+++ b/include/uapi/linux/kfd_ioctl.h
@@ -76,6 +76,12 @@ struct kfd_ioctl_update_queue_args {
__u32 queue_priority; /* to KFD */
};
+struct kfd_ioctl_set_cu_mask_args {
+ __u32 queue_id; /* to KFD */
+ __u32 num_cu_mask; /* to KFD */
+ __u64 cu_mask_ptr; /* to KFD */
+};
+
/* For kfd_ioctl_set_memory_policy_args.default_policy and alternate_policy */
#define KFD_IOC_CACHE_POLICY_COHERENT 0
#define KFD_IOC_CACHE_POLICY_NONCOHERENT 1
@@ -189,6 +195,15 @@ struct kfd_ioctl_dbg_wave_control_args {
#define KFD_SIGNAL_EVENT_LIMIT 4096
+/* For kfd_event_data.hw_exception_data.reset_type. */
+#define KFD_HW_EXCEPTION_WHOLE_GPU_RESET 0
+#define KFD_HW_EXCEPTION_PER_ENGINE_RESET 1
+
+/* For kfd_event_data.hw_exception_data.reset_cause. */
+#define KFD_HW_EXCEPTION_GPU_HANG 0
+#define KFD_HW_EXCEPTION_ECC 1
+
+
struct kfd_ioctl_create_event_args {
__u64 event_page_offset; /* from KFD */
__u32 event_trigger_data; /* from KFD - signal events only */
@@ -219,7 +234,7 @@ struct kfd_memory_exception_failure {
__u32 NotPresent; /* Page not present or supervisor privilege */
__u32 ReadOnly; /* Write access to a read-only page */
__u32 NoExecute; /* Execute access to a page marked NX */
- __u32 pad;
+ __u32 imprecise; /* Can't determine the exact fault address */
};
/* memory exception data*/
@@ -230,10 +245,19 @@ struct kfd_hsa_memory_exception_data {
__u32 pad;
};
-/* Event data*/
+/* hw exception data */
+struct kfd_hsa_hw_exception_data {
+ uint32_t reset_type;
+ uint32_t reset_cause;
+ uint32_t memory_lost;
+ uint32_t gpu_id;
+};
+
+/* Event data */
struct kfd_event_data {
union {
struct kfd_hsa_memory_exception_data memory_exception_data;
+ struct kfd_hsa_hw_exception_data hw_exception_data;
}; /* From KFD */
__u64 kfd_event_data_ext; /* pointer to an extension structure
for future exception types */
@@ -448,7 +472,10 @@ struct kfd_ioctl_unmap_memory_from_gpu_args {
#define AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU \
AMDKFD_IOWR(0x19, struct kfd_ioctl_unmap_memory_from_gpu_args)
+#define AMDKFD_IOC_SET_CU_MASK \
+ AMDKFD_IOW(0x1A, struct kfd_ioctl_set_cu_mask_args)
+
#define AMDKFD_COMMAND_START 0x01
-#define AMDKFD_COMMAND_END 0x1A
+#define AMDKFD_COMMAND_END 0x1B
#endif
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index b6270a3b38e9..07548de5c988 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -949,6 +949,9 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_GET_MSR_FEATURES 153
#define KVM_CAP_HYPERV_EVENTFD 154
#define KVM_CAP_HYPERV_TLBFLUSH 155
+#define KVM_CAP_S390_HPAGE_1M 156
+#define KVM_CAP_NESTED_STATE 157
+#define KVM_CAP_ARM_INJECT_SERROR_ESR 158
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1391,6 +1394,9 @@ struct kvm_enc_region {
/* Available with KVM_CAP_HYPERV_EVENTFD */
#define KVM_HYPERV_EVENTFD _IOW(KVMIO, 0xbd, struct kvm_hyperv_eventfd)
+/* Available with KVM_CAP_NESTED_STATE */
+#define KVM_GET_NESTED_STATE _IOWR(KVMIO, 0xbe, struct kvm_nested_state)
+#define KVM_SET_NESTED_STATE _IOW(KVMIO, 0xbf, struct kvm_nested_state)
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
diff --git a/include/uapi/linux/kvm_para.h b/include/uapi/linux/kvm_para.h
index dcf629dd2889..6c0ce49931e5 100644
--- a/include/uapi/linux/kvm_para.h
+++ b/include/uapi/linux/kvm_para.h
@@ -13,6 +13,7 @@
/* Return values for hypercalls */
#define KVM_ENOSYS 1000
#define KVM_EFAULT EFAULT
+#define KVM_EINVAL EINVAL
#define KVM_E2BIG E2BIG
#define KVM_EPERM EPERM
#define KVM_EOPNOTSUPP 95
@@ -26,6 +27,7 @@
#define KVM_HC_MIPS_EXIT_VM 7
#define KVM_HC_MIPS_CONSOLE_OUTPUT 8
#define KVM_HC_CLOCK_PAIRING 9
+#define KVM_HC_SEND_IPI 10
/*
* hypercalls use architecture specific
diff --git a/include/uapi/linux/l2tp.h b/include/uapi/linux/l2tp.h
index 7d570c7bd117..61158f5a1a5b 100644
--- a/include/uapi/linux/l2tp.h
+++ b/include/uapi/linux/l2tp.h
@@ -60,14 +60,14 @@ struct sockaddr_l2tpip6 {
/*
* Commands.
* Valid TLVs of each command are:-
- * TUNNEL_CREATE - CONN_ID, pw_type, netns, ifname, ipinfo, udpinfo, udpcsum, vlanid
+ * TUNNEL_CREATE - CONN_ID, pw_type, netns, ifname, ipinfo, udpinfo, udpcsum
* TUNNEL_DELETE - CONN_ID
* TUNNEL_MODIFY - CONN_ID, udpcsum
* TUNNEL_GETSTATS - CONN_ID, (stats)
* TUNNEL_GET - CONN_ID, (...)
- * SESSION_CREATE - SESSION_ID, PW_TYPE, data_seq, cookie, peer_cookie, l2spec
+ * SESSION_CREATE - SESSION_ID, PW_TYPE, cookie, peer_cookie, l2spec
* SESSION_DELETE - SESSION_ID
- * SESSION_MODIFY - SESSION_ID, data_seq
+ * SESSION_MODIFY - SESSION_ID
* SESSION_GET - SESSION_ID, (...)
* SESSION_GETSTATS - SESSION_ID, (stats)
*
@@ -95,7 +95,7 @@ enum {
L2TP_ATTR_PW_TYPE, /* u16, enum l2tp_pwtype */
L2TP_ATTR_ENCAP_TYPE, /* u16, enum l2tp_encap_type */
L2TP_ATTR_OFFSET, /* u16 (not used) */
- L2TP_ATTR_DATA_SEQ, /* u16 */
+ L2TP_ATTR_DATA_SEQ, /* u16 (not used) */
L2TP_ATTR_L2SPEC_TYPE, /* u8, enum l2tp_l2spec_type */
L2TP_ATTR_L2SPEC_LEN, /* u8 (not used) */
L2TP_ATTR_PROTO_VERSION, /* u8 */
@@ -105,7 +105,7 @@ enum {
L2TP_ATTR_SESSION_ID, /* u32 */
L2TP_ATTR_PEER_SESSION_ID, /* u32 */
L2TP_ATTR_UDP_CSUM, /* u8 */
- L2TP_ATTR_VLAN_ID, /* u16 */
+ L2TP_ATTR_VLAN_ID, /* u16 (not used) */
L2TP_ATTR_COOKIE, /* 0, 4 or 8 bytes */
L2TP_ATTR_PEER_COOKIE, /* 0, 4 or 8 bytes */
L2TP_ATTR_DEBUG, /* u32, enum l2tp_debug_flags */
@@ -119,8 +119,8 @@ enum {
L2TP_ATTR_IP_DADDR, /* u32 */
L2TP_ATTR_UDP_SPORT, /* u16 */
L2TP_ATTR_UDP_DPORT, /* u16 */
- L2TP_ATTR_MTU, /* u16 */
- L2TP_ATTR_MRU, /* u16 */
+ L2TP_ATTR_MTU, /* u16 (not used) */
+ L2TP_ATTR_MRU, /* u16 (not used) */
L2TP_ATTR_STATS, /* nested */
L2TP_ATTR_IP6_SADDR, /* struct in6_addr */
L2TP_ATTR_IP6_DADDR, /* struct in6_addr */
@@ -169,6 +169,7 @@ enum l2tp_encap_type {
L2TP_ENCAPTYPE_IP,
};
+/* For L2TP_ATTR_DATA_SEQ. Unused. */
enum l2tp_seqmode {
L2TP_SEQ_NONE = 0,
L2TP_SEQ_IP = 1,
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index 9e3511742fdc..d6a5a3bfe6c4 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -62,7 +62,7 @@
#define MEDIA_BUS_FMT_RGB121212_1X36 0x1019
#define MEDIA_BUS_FMT_RGB161616_1X48 0x101a
-/* YUV (including grey) - next is 0x202c */
+/* YUV (including grey) - next is 0x202d */
#define MEDIA_BUS_FMT_Y8_1X8 0x2001
#define MEDIA_BUS_FMT_UV8_1X8 0x2015
#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002
@@ -74,6 +74,7 @@
#define MEDIA_BUS_FMT_YUYV8_2X8 0x2008
#define MEDIA_BUS_FMT_YVYU8_2X8 0x2009
#define MEDIA_BUS_FMT_Y10_1X10 0x200a
+#define MEDIA_BUS_FMT_Y10_2X8_PADHI_LE 0x202c
#define MEDIA_BUS_FMT_UYVY10_2X10 0x2018
#define MEDIA_BUS_FMT_VYUY10_2X10 0x2019
#define MEDIA_BUS_FMT_YUYV10_2X10 0x200b
diff --git a/include/uapi/linux/media.h b/include/uapi/linux/media.h
index c7e9a5cba24e..36f76e777ef9 100644
--- a/include/uapi/linux/media.h
+++ b/include/uapi/linux/media.h
@@ -25,7 +25,6 @@
#endif
#include <linux/ioctl.h>
#include <linux/types.h>
-#include <linux/version.h>
struct media_device_info {
char driver[16];
@@ -90,12 +89,6 @@ struct media_device_info {
#define MEDIA_ENT_F_LENS (MEDIA_ENT_F_OLD_SUBDEV_BASE + 3)
/*
- * Video decoder functions
- */
-#define MEDIA_ENT_F_ATV_DECODER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 4)
-#define MEDIA_ENT_F_DTV_DECODER (MEDIA_ENT_F_BASE + 0x6001)
-
-/*
* Digital TV, analog TV, radio and/or software defined radio tuner functions.
*
* It is a responsibility of the master/bridge drivers to add connectors
@@ -132,6 +125,8 @@ struct media_device_info {
#define MEDIA_ENT_F_PROC_VIDEO_LUT (MEDIA_ENT_F_BASE + 0x4004)
#define MEDIA_ENT_F_PROC_VIDEO_SCALER (MEDIA_ENT_F_BASE + 0x4005)
#define MEDIA_ENT_F_PROC_VIDEO_STATISTICS (MEDIA_ENT_F_BASE + 0x4006)
+#define MEDIA_ENT_F_PROC_VIDEO_ENCODER (MEDIA_ENT_F_BASE + 0x4007)
+#define MEDIA_ENT_F_PROC_VIDEO_DECODER (MEDIA_ENT_F_BASE + 0x4008)
/*
* Switch and bridge entity functions
@@ -139,6 +134,13 @@ struct media_device_info {
#define MEDIA_ENT_F_VID_MUX (MEDIA_ENT_F_BASE + 0x5001)
#define MEDIA_ENT_F_VID_IF_BRIDGE (MEDIA_ENT_F_BASE + 0x5002)
+/*
+ * Video decoder/encoder functions
+ */
+#define MEDIA_ENT_F_ATV_DECODER (MEDIA_ENT_F_OLD_SUBDEV_BASE + 4)
+#define MEDIA_ENT_F_DV_DECODER (MEDIA_ENT_F_BASE + 0x6001)
+#define MEDIA_ENT_F_DV_ENCODER (MEDIA_ENT_F_BASE + 0x6002)
+
/* Entity flags */
#define MEDIA_ENT_FL_DEFAULT (1 << 0)
#define MEDIA_ENT_FL_CONNECTOR (1 << 1)
@@ -280,11 +282,21 @@ struct media_links_enum {
* MC next gen API definitions
*/
+/*
+ * Appeared in 4.19.0.
+ *
+ * The media_version argument comes from the media_version field in
+ * struct media_device_info.
+ */
+#define MEDIA_V2_ENTITY_HAS_FLAGS(media_version) \
+ ((media_version) >= ((4 << 16) | (19 << 8) | 0))
+
struct media_v2_entity {
__u32 id;
char name[64];
__u32 function; /* Main function of the entity */
- __u32 reserved[6];
+ __u32 flags;
+ __u32 reserved[5];
} __attribute__ ((packed));
/* Should match the specific fields at media_intf_devnode */
@@ -305,11 +317,21 @@ struct media_v2_interface {
};
} __attribute__ ((packed));
+/*
+ * Appeared in 4.19.0.
+ *
+ * The media_version argument comes from the media_version field in
+ * struct media_device_info.
+ */
+#define MEDIA_V2_PAD_HAS_INDEX(media_version) \
+ ((media_version) >= ((4 << 16) | (19 << 8) | 0))
+
struct media_v2_pad {
__u32 id;
__u32 entity_id;
__u32 flags;
- __u32 reserved[5];
+ __u32 index;
+ __u32 reserved[4];
} __attribute__ ((packed));
struct media_v2_link {
@@ -348,7 +370,7 @@ struct media_v2_topology {
#define MEDIA_IOC_SETUP_LINK _IOWR('|', 0x03, struct media_link_desc)
#define MEDIA_IOC_G_TOPOLOGY _IOWR('|', 0x04, struct media_v2_topology)
-#if !defined(__KERNEL__) || defined(__NEED_MEDIA_LEGACY_API)
+#ifndef __KERNEL__
/*
* Legacy symbols used to avoid userspace compilation breakages.
@@ -380,6 +402,8 @@ struct media_v2_topology {
#define MEDIA_ENT_T_V4L2_SUBDEV_DECODER MEDIA_ENT_F_ATV_DECODER
#define MEDIA_ENT_T_V4L2_SUBDEV_TUNER MEDIA_ENT_F_TUNER
+#define MEDIA_ENT_F_DTV_DECODER MEDIA_ENT_F_DV_DECODER
+
/*
* There is still no ALSA support in the media controller. These
* defines should not have been added and we leave them here only
@@ -396,7 +420,7 @@ struct media_v2_topology {
#define MEDIA_INTF_T_ALSA_TIMER (MEDIA_INTF_T_ALSA_BASE + 7)
/* Obsolete symbol for media_version, no longer used in the kernel */
-#define MEDIA_API_VERSION KERNEL_VERSION(0, 1, 0)
+#define MEDIA_API_VERSION ((0 << 16) | (1 << 8) | 0)
#endif
diff --git a/include/uapi/linux/mii.h b/include/uapi/linux/mii.h
index b5c2fdcf23fd..a506216591d6 100644
--- a/include/uapi/linux/mii.h
+++ b/include/uapi/linux/mii.h
@@ -136,6 +136,7 @@
#define CTL1000_ENABLE_MASTER 0x1000
/* 1000BASE-T Status register */
+#define LPA_1000MSFAIL 0x8000 /* Master/Slave resolution failure */
#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */
#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */
#define LPA_1000FULL 0x0800 /* Link partner 1000BASE-T full duplex */
diff --git a/include/uapi/linux/mroute.h b/include/uapi/linux/mroute.h
index 10f9ff9426a2..5d37a9ccce63 100644
--- a/include/uapi/linux/mroute.h
+++ b/include/uapi/linux/mroute.h
@@ -120,6 +120,7 @@ enum {
IPMRA_TABLE_MROUTE_DO_ASSERT,
IPMRA_TABLE_MROUTE_DO_PIM,
IPMRA_TABLE_VIFS,
+ IPMRA_TABLE_MROUTE_DO_WRVIFWHOLE,
__IPMRA_TABLE_MAX
};
#define IPMRA_TABLE_MAX (__IPMRA_TABLE_MAX - 1)
@@ -173,5 +174,6 @@ enum {
#define IGMPMSG_NOCACHE 1 /* Kern cache fill request to mrouted */
#define IGMPMSG_WRONGVIF 2 /* For PIM assert processing (unused) */
#define IGMPMSG_WHOLEPKT 3 /* For PIM Register processing */
+#define IGMPMSG_WRVIFWHOLE 4 /* For PIM Register and assert processing */
#endif /* _UAPI__LINUX_MROUTE_H */
diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h
index 85a3fb65e40a..20d6cc91435d 100644
--- a/include/uapi/linux/nbd.h
+++ b/include/uapi/linux/nbd.h
@@ -53,6 +53,9 @@ enum {
/* These are client behavior specific flags. */
#define NBD_CFLAG_DESTROY_ON_DISCONNECT (1 << 0) /* delete the nbd device on
disconnect. */
+#define NBD_CFLAG_DISCONNECT_ON_CLOSE (1 << 1) /* disconnect the nbd device on
+ * close by last opener.
+ */
/* userspace doesn't need the nbd_device structure */
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index 4fe104b2411f..97ff3c17ec4d 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -141,4 +141,22 @@ struct scm_ts_pktinfo {
__u32 reserved[2];
};
+/*
+ * SO_TXTIME gets a struct sock_txtime with flags being an integer bit
+ * field comprised of these values.
+ */
+enum txtime_flags {
+ SOF_TXTIME_DEADLINE_MODE = (1 << 0),
+ SOF_TXTIME_REPORT_ERRORS = (1 << 1),
+
+ SOF_TXTIME_FLAGS_LAST = SOF_TXTIME_REPORT_ERRORS,
+ SOF_TXTIME_FLAGS_MASK = (SOF_TXTIME_FLAGS_LAST - 1) |
+ SOF_TXTIME_FLAGS_LAST
+};
+
+struct sock_txtime {
+ clockid_t clockid; /* reference clockid */
+ __u32 flags; /* as defined by enum txtime_flags */
+};
+
#endif /* _NET_TIMESTAMPING_H */
diff --git a/include/uapi/linux/netconf.h b/include/uapi/linux/netconf.h
index c84fcdfca862..fac4edd55379 100644
--- a/include/uapi/linux/netconf.h
+++ b/include/uapi/linux/netconf.h
@@ -18,6 +18,7 @@ enum {
NETCONFA_PROXY_NEIGH,
NETCONFA_IGNORE_ROUTES_WITH_LINKDOWN,
NETCONFA_INPUT,
+ NETCONFA_BC_FORWARDING,
__NETCONFA_MAX
};
#define NETCONFA_MAX (__NETCONFA_MAX - 1)
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 89438e68dc03..e23290ffdc77 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -8,6 +8,7 @@
#define NFT_SET_MAXNAMELEN NFT_NAME_MAXLEN
#define NFT_OBJ_MAXNAMELEN NFT_NAME_MAXLEN
#define NFT_USERDATA_MAXLEN 256
+#define NFT_OSF_MAXGENRELEN 16
/**
* enum nft_registers - nf_tables registers
@@ -921,10 +922,12 @@ enum nft_socket_attributes {
/*
* enum nft_socket_keys - nf_tables socket expression keys
*
- * @NFT_SOCKET_TRANSPARENT: Value of the IP(V6)_TRANSPARENT socket option_
+ * @NFT_SOCKET_TRANSPARENT: Value of the IP(V6)_TRANSPARENT socket option
+ * @NFT_SOCKET_MARK: Value of the socket mark
*/
enum nft_socket_keys {
NFT_SOCKET_TRANSPARENT,
+ NFT_SOCKET_MARK,
__NFT_SOCKET_MAX
};
#define NFT_SOCKET_MAX (__NFT_SOCKET_MAX - 1)
@@ -955,6 +958,7 @@ enum nft_socket_keys {
* @NFT_CT_DST_IP: conntrack layer 3 protocol destination (IPv4 address)
* @NFT_CT_SRC_IP6: conntrack layer 3 protocol source (IPv6 address)
* @NFT_CT_DST_IP6: conntrack layer 3 protocol destination (IPv6 address)
+ * @NFT_CT_TIMEOUT: connection tracking timeout policy assigned to conntrack
*/
enum nft_ct_keys {
NFT_CT_STATE,
@@ -980,6 +984,7 @@ enum nft_ct_keys {
NFT_CT_DST_IP,
NFT_CT_SRC_IP6,
NFT_CT_DST_IP6,
+ NFT_CT_TIMEOUT,
__NFT_CT_MAX
};
#define NFT_CT_MAX (__NFT_CT_MAX - 1)
@@ -1251,6 +1256,22 @@ enum nft_nat_attributes {
#define NFTA_NAT_MAX (__NFTA_NAT_MAX - 1)
/**
+ * enum nft_tproxy_attributes - nf_tables tproxy expression netlink attributes
+ *
+ * NFTA_TPROXY_FAMILY: Target address family (NLA_U32: nft_registers)
+ * NFTA_TPROXY_REG_ADDR: Target address register (NLA_U32: nft_registers)
+ * NFTA_TPROXY_REG_PORT: Target port register (NLA_U32: nft_registers)
+ */
+enum nft_tproxy_attributes {
+ NFTA_TPROXY_UNSPEC,
+ NFTA_TPROXY_FAMILY,
+ NFTA_TPROXY_REG_ADDR,
+ NFTA_TPROXY_REG_PORT,
+ __NFTA_TPROXY_MAX
+};
+#define NFTA_TPROXY_MAX (__NFTA_TPROXY_MAX - 1)
+
+/**
* enum nft_masq_attributes - nf_tables masquerade expression attributes
*
* @NFTA_MASQ_FLAGS: NAT flags (see NF_NAT_RANGE_* in linux/netfilter/nf_nat.h) (NLA_U32)
@@ -1392,13 +1413,24 @@ enum nft_ct_helper_attributes {
};
#define NFTA_CT_HELPER_MAX (__NFTA_CT_HELPER_MAX - 1)
+enum nft_ct_timeout_timeout_attributes {
+ NFTA_CT_TIMEOUT_UNSPEC,
+ NFTA_CT_TIMEOUT_L3PROTO,
+ NFTA_CT_TIMEOUT_L4PROTO,
+ NFTA_CT_TIMEOUT_DATA,
+ __NFTA_CT_TIMEOUT_MAX,
+};
+#define NFTA_CT_TIMEOUT_MAX (__NFTA_CT_TIMEOUT_MAX - 1)
+
#define NFT_OBJECT_UNSPEC 0
#define NFT_OBJECT_COUNTER 1
#define NFT_OBJECT_QUOTA 2
#define NFT_OBJECT_CT_HELPER 3
#define NFT_OBJECT_LIMIT 4
#define NFT_OBJECT_CONNLIMIT 5
-#define __NFT_OBJECT_MAX 6
+#define NFT_OBJECT_TUNNEL 6
+#define NFT_OBJECT_CT_TIMEOUT 7
+#define __NFT_OBJECT_MAX 8
#define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1)
/**
@@ -1461,6 +1493,13 @@ enum nft_flowtable_hook_attributes {
};
#define NFTA_FLOWTABLE_HOOK_MAX (__NFTA_FLOWTABLE_HOOK_MAX - 1)
+enum nft_osf_attributes {
+ NFTA_OSF_UNSPEC,
+ NFTA_OSF_DREG,
+ __NFTA_OSF_MAX,
+};
+#define NFTA_OSF_MAX (__NFTA_OSF_MAX - 1)
+
/**
* enum nft_device_attributes - nf_tables device netlink attributes
*
@@ -1555,4 +1594,85 @@ enum nft_ng_types {
};
#define NFT_NG_MAX (__NFT_NG_MAX - 1)
+enum nft_tunnel_key_ip_attributes {
+ NFTA_TUNNEL_KEY_IP_UNSPEC,
+ NFTA_TUNNEL_KEY_IP_SRC,
+ NFTA_TUNNEL_KEY_IP_DST,
+ __NFTA_TUNNEL_KEY_IP_MAX
+};
+#define NFTA_TUNNEL_KEY_IP_MAX (__NFTA_TUNNEL_KEY_IP_MAX - 1)
+
+enum nft_tunnel_ip6_attributes {
+ NFTA_TUNNEL_KEY_IP6_UNSPEC,
+ NFTA_TUNNEL_KEY_IP6_SRC,
+ NFTA_TUNNEL_KEY_IP6_DST,
+ NFTA_TUNNEL_KEY_IP6_FLOWLABEL,
+ __NFTA_TUNNEL_KEY_IP6_MAX
+};
+#define NFTA_TUNNEL_KEY_IP6_MAX (__NFTA_TUNNEL_KEY_IP6_MAX - 1)
+
+enum nft_tunnel_opts_attributes {
+ NFTA_TUNNEL_KEY_OPTS_UNSPEC,
+ NFTA_TUNNEL_KEY_OPTS_VXLAN,
+ NFTA_TUNNEL_KEY_OPTS_ERSPAN,
+ __NFTA_TUNNEL_KEY_OPTS_MAX
+};
+#define NFTA_TUNNEL_KEY_OPTS_MAX (__NFTA_TUNNEL_KEY_OPTS_MAX - 1)
+
+enum nft_tunnel_opts_vxlan_attributes {
+ NFTA_TUNNEL_KEY_VXLAN_UNSPEC,
+ NFTA_TUNNEL_KEY_VXLAN_GBP,
+ __NFTA_TUNNEL_KEY_VXLAN_MAX
+};
+#define NFTA_TUNNEL_KEY_VXLAN_MAX (__NFTA_TUNNEL_KEY_VXLAN_MAX - 1)
+
+enum nft_tunnel_opts_erspan_attributes {
+ NFTA_TUNNEL_KEY_ERSPAN_UNSPEC,
+ NFTA_TUNNEL_KEY_ERSPAN_VERSION,
+ NFTA_TUNNEL_KEY_ERSPAN_V1_INDEX,
+ NFTA_TUNNEL_KEY_ERSPAN_V2_HWID,
+ NFTA_TUNNEL_KEY_ERSPAN_V2_DIR,
+ __NFTA_TUNNEL_KEY_ERSPAN_MAX
+};
+#define NFTA_TUNNEL_KEY_ERSPAN_MAX (__NFTA_TUNNEL_KEY_ERSPAN_MAX - 1)
+
+enum nft_tunnel_flags {
+ NFT_TUNNEL_F_ZERO_CSUM_TX = (1 << 0),
+ NFT_TUNNEL_F_DONT_FRAGMENT = (1 << 1),
+ NFT_TUNNEL_F_SEQ_NUMBER = (1 << 2),
+};
+#define NFT_TUNNEL_F_MASK (NFT_TUNNEL_F_ZERO_CSUM_TX | \
+ NFT_TUNNEL_F_DONT_FRAGMENT | \
+ NFT_TUNNEL_F_SEQ_NUMBER)
+
+enum nft_tunnel_key_attributes {
+ NFTA_TUNNEL_KEY_UNSPEC,
+ NFTA_TUNNEL_KEY_ID,
+ NFTA_TUNNEL_KEY_IP,
+ NFTA_TUNNEL_KEY_IP6,
+ NFTA_TUNNEL_KEY_FLAGS,
+ NFTA_TUNNEL_KEY_TOS,
+ NFTA_TUNNEL_KEY_TTL,
+ NFTA_TUNNEL_KEY_SPORT,
+ NFTA_TUNNEL_KEY_DPORT,
+ NFTA_TUNNEL_KEY_OPTS,
+ __NFTA_TUNNEL_KEY_MAX
+};
+#define NFTA_TUNNEL_KEY_MAX (__NFTA_TUNNEL_KEY_MAX - 1)
+
+enum nft_tunnel_keys {
+ NFT_TUNNEL_PATH,
+ NFT_TUNNEL_ID,
+ __NFT_TUNNEL_MAX
+};
+#define NFT_TUNNEL_MAX (__NFT_TUNNEL_MAX - 1)
+
+enum nft_tunnel_attributes {
+ NFTA_TUNNEL_UNSPEC,
+ NFTA_TUNNEL_KEY,
+ NFTA_TUNNEL_DREG,
+ __NFTA_TUNNEL_MAX
+};
+#define NFTA_TUNNEL_MAX (__NFTA_TUNNEL_MAX - 1)
+
#endif /* _LINUX_NF_TABLES_H */
diff --git a/include/uapi/linux/netfilter/nf_osf.h b/include/uapi/linux/netfilter/nfnetlink_osf.h
index 8f2f2f403183..272bc3195f2d 100644
--- a/include/uapi/linux/netfilter/nf_osf.h
+++ b/include/uapi/linux/netfilter/nfnetlink_osf.h
@@ -2,6 +2,8 @@
#define _NF_OSF_H
#include <linux/types.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
#define MAXGENRELEN 32
@@ -16,9 +18,14 @@
#define NF_OSF_TTL_TRUE 0 /* True ip and fingerprint TTL comparison */
+/* Check if ip TTL is less than fingerprint one */
+#define NF_OSF_TTL_LESS 1
+
/* Do not compare ip and fingerprint TTL at all */
#define NF_OSF_TTL_NOCHECK 2
+#define NF_OSF_FLAGMASK (NF_OSF_GENRE | NF_OSF_TTL | \
+ NF_OSF_LOG | NF_OSF_INVERT)
/* Wildcard MSS (kind of).
* It is used to implement a state machine for the different wildcard values
* of the MSS and window sizes.
@@ -83,4 +90,31 @@ enum iana_options {
OSFOPT_EMPTY = 255,
};
+/* Initial window size option state machine: multiple of mss, mtu or
+ * plain numeric value. Can also be made as plain numeric value which
+ * is not a multiple of specified value.
+ */
+enum nf_osf_window_size_options {
+ OSF_WSS_PLAIN = 0,
+ OSF_WSS_MSS,
+ OSF_WSS_MTU,
+ OSF_WSS_MODULO,
+ OSF_WSS_MAX,
+};
+
+enum nf_osf_attr_type {
+ OSF_ATTR_UNSPEC,
+ OSF_ATTR_FINGER,
+ OSF_ATTR_MAX,
+};
+
+/*
+ * Add/remove fingerprint from the kernel.
+ */
+enum nf_osf_msg_types {
+ OSF_MSG_ADD,
+ OSF_MSG_REMOVE,
+ OSF_MSG_MAX,
+};
+
#endif /* _NF_OSF_H */
diff --git a/include/uapi/linux/netfilter/xt_osf.h b/include/uapi/linux/netfilter/xt_osf.h
index 72956eceeb09..6e466236ca4b 100644
--- a/include/uapi/linux/netfilter/xt_osf.h
+++ b/include/uapi/linux/netfilter/xt_osf.h
@@ -21,9 +21,7 @@
#define _XT_OSF_H
#include <linux/types.h>
-#include <linux/ip.h>
-#include <linux/tcp.h>
-#include <linux/netfilter/nf_osf.h>
+#include <linux/netfilter/nfnetlink_osf.h>
#define XT_OSF_GENRE NF_OSF_GENRE
#define XT_OSF_INVERT NF_OSF_INVERT
@@ -37,8 +35,7 @@
#define XT_OSF_TTL_TRUE NF_OSF_TTL_TRUE
#define XT_OSF_TTL_NOCHECK NF_OSF_TTL_NOCHECK
-
-#define XT_OSF_TTL_LESS 1 /* Check if ip TTL is less than fingerprint one */
+#define XT_OSF_TTL_LESS NF_OSF_TTL_LESS
#define xt_osf_wc nf_osf_wc
#define xt_osf_opt nf_osf_opt
@@ -47,19 +44,8 @@
#define xt_osf_finger nf_osf_finger
#define xt_osf_nlmsg nf_osf_nlmsg
-/*
- * Add/remove fingerprint from the kernel.
- */
-enum xt_osf_msg_types {
- OSF_MSG_ADD,
- OSF_MSG_REMOVE,
- OSF_MSG_MAX,
-};
-
-enum xt_osf_attr_type {
- OSF_ATTR_UNSPEC,
- OSF_ATTR_FINGER,
- OSF_ATTR_MAX,
-};
+#define xt_osf_window_size_options nf_osf_window_size_options
+#define xt_osf_attr_type nf_osf_attr_type
+#define xt_osf_msg_types nf_osf_msg_types
#endif /* _XT_OSF_H */
diff --git a/include/uapi/linux/netfilter_bridge.h b/include/uapi/linux/netfilter_bridge.h
index 12fb77633f83..156ccd089df1 100644
--- a/include/uapi/linux/netfilter_bridge.h
+++ b/include/uapi/linux/netfilter_bridge.h
@@ -26,4 +26,15 @@
#define NF_BR_BROUTING 5
#define NF_BR_NUMHOOKS 6
+enum nf_br_hook_priorities {
+ NF_BR_PRI_FIRST = INT_MIN,
+ NF_BR_PRI_NAT_DST_BRIDGED = -300,
+ NF_BR_PRI_FILTER_BRIDGED = -200,
+ NF_BR_PRI_BRNF = 0,
+ NF_BR_PRI_NAT_DST_OTHER = 100,
+ NF_BR_PRI_FILTER_OTHER = 200,
+ NF_BR_PRI_NAT_SRC = 300,
+ NF_BR_PRI_LAST = INT_MAX,
+};
+
#endif /* _UAPI__LINUX_BRIDGE_NETFILTER_H */
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 27e4e441caac..7acc16f34942 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -2237,6 +2237,9 @@ enum nl80211_commands {
* enforced.
* @NL80211_ATTR_TXQ_QUANTUM: TXQ scheduler quantum (bytes). Number of bytes
* a flow is assigned on each round of the DRR scheduler.
+ * @NL80211_ATTR_HE_CAPABILITY: HE Capability information element (from
+ * association request when used with NL80211_CMD_NEW_STATION). Can be set
+ * only if %NL80211_STA_FLAG_WME is set.
*
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
@@ -2677,6 +2680,8 @@ enum nl80211_attrs {
NL80211_ATTR_TXQ_MEMORY_LIMIT,
NL80211_ATTR_TXQ_QUANTUM,
+ NL80211_ATTR_HE_CAPABILITY,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -2726,7 +2731,8 @@ enum nl80211_attrs {
#define NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY 24
#define NL80211_HT_CAPABILITY_LEN 26
#define NL80211_VHT_CAPABILITY_LEN 12
-
+#define NL80211_HE_MIN_CAPABILITY_LEN 16
+#define NL80211_HE_MAX_CAPABILITY_LEN 51
#define NL80211_MAX_NR_CIPHER_SUITES 5
#define NL80211_MAX_NR_AKM_SUITES 2
@@ -2854,6 +2860,38 @@ struct nl80211_sta_flag_update {
} __attribute__((packed));
/**
+ * enum nl80211_he_gi - HE guard interval
+ * @NL80211_RATE_INFO_HE_GI_0_8: 0.8 usec
+ * @NL80211_RATE_INFO_HE_GI_1_6: 1.6 usec
+ * @NL80211_RATE_INFO_HE_GI_3_2: 3.2 usec
+ */
+enum nl80211_he_gi {
+ NL80211_RATE_INFO_HE_GI_0_8,
+ NL80211_RATE_INFO_HE_GI_1_6,
+ NL80211_RATE_INFO_HE_GI_3_2,
+};
+
+/**
+ * enum nl80211_he_ru_alloc - HE RU allocation values
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_26: 26-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_52: 52-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_106: 106-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_242: 242-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_484: 484-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_996: 996-tone RU allocation
+ * @NL80211_RATE_INFO_HE_RU_ALLOC_2x996: 2x996-tone RU allocation
+ */
+enum nl80211_he_ru_alloc {
+ NL80211_RATE_INFO_HE_RU_ALLOC_26,
+ NL80211_RATE_INFO_HE_RU_ALLOC_52,
+ NL80211_RATE_INFO_HE_RU_ALLOC_106,
+ NL80211_RATE_INFO_HE_RU_ALLOC_242,
+ NL80211_RATE_INFO_HE_RU_ALLOC_484,
+ NL80211_RATE_INFO_HE_RU_ALLOC_996,
+ NL80211_RATE_INFO_HE_RU_ALLOC_2x996,
+};
+
+/**
* enum nl80211_rate_info - bitrate information
*
* These attribute types are used with %NL80211_STA_INFO_TXRATE
@@ -2885,6 +2923,13 @@ struct nl80211_sta_flag_update {
* @NL80211_RATE_INFO_5_MHZ_WIDTH: 5 MHz width - note that this is
* a legacy rate and will be reported as the actual bitrate, i.e.
* a quarter of the base (20 MHz) rate
+ * @NL80211_RATE_INFO_HE_MCS: HE MCS index (u8, 0-11)
+ * @NL80211_RATE_INFO_HE_NSS: HE NSS value (u8, 1-8)
+ * @NL80211_RATE_INFO_HE_GI: HE guard interval identifier
+ * (u8, see &enum nl80211_he_gi)
+ * @NL80211_RATE_INFO_HE_DCM: HE DCM value (u8, 0/1)
+ * @NL80211_RATE_INFO_RU_ALLOC: HE RU allocation, if not present then
+ * non-OFDMA was used (u8, see &enum nl80211_he_ru_alloc)
* @__NL80211_RATE_INFO_AFTER_LAST: internal use
*/
enum nl80211_rate_info {
@@ -2901,6 +2946,11 @@ enum nl80211_rate_info {
NL80211_RATE_INFO_160_MHZ_WIDTH,
NL80211_RATE_INFO_10_MHZ_WIDTH,
NL80211_RATE_INFO_5_MHZ_WIDTH,
+ NL80211_RATE_INFO_HE_MCS,
+ NL80211_RATE_INFO_HE_NSS,
+ NL80211_RATE_INFO_HE_GI,
+ NL80211_RATE_INFO_HE_DCM,
+ NL80211_RATE_INFO_HE_RU_ALLOC,
/* keep last */
__NL80211_RATE_INFO_AFTER_LAST,
@@ -3167,6 +3217,38 @@ enum nl80211_mpath_info {
};
/**
+ * enum nl80211_band_iftype_attr - Interface type data attributes
+ *
+ * @__NL80211_BAND_IFTYPE_ATTR_INVALID: attribute number 0 is reserved
+ * @NL80211_BAND_IFTYPE_ATTR_IFTYPES: nested attribute containing a flag attribute
+ * for each interface type that supports the band data
+ * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC: HE MAC capabilities as in HE
+ * capabilities IE
+ * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY: HE PHY capabilities as in HE
+ * capabilities IE
+ * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET: HE supported NSS/MCS as in HE
+ * capabilities IE
+ * @NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE: HE PPE thresholds information as
+ * defined in HE capabilities IE
+ * @NL80211_BAND_IFTYPE_ATTR_MAX: highest band HE capability attribute currently
+ * defined
+ * @__NL80211_BAND_IFTYPE_ATTR_AFTER_LAST: internal use
+ */
+enum nl80211_band_iftype_attr {
+ __NL80211_BAND_IFTYPE_ATTR_INVALID,
+
+ NL80211_BAND_IFTYPE_ATTR_IFTYPES,
+ NL80211_BAND_IFTYPE_ATTR_HE_CAP_MAC,
+ NL80211_BAND_IFTYPE_ATTR_HE_CAP_PHY,
+ NL80211_BAND_IFTYPE_ATTR_HE_CAP_MCS_SET,
+ NL80211_BAND_IFTYPE_ATTR_HE_CAP_PPE,
+
+ /* keep last */
+ __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST,
+ NL80211_BAND_IFTYPE_ATTR_MAX = __NL80211_BAND_IFTYPE_ATTR_AFTER_LAST - 1
+};
+
+/**
* enum nl80211_band_attr - band attributes
* @__NL80211_BAND_ATTR_INVALID: attribute number 0 is reserved
* @NL80211_BAND_ATTR_FREQS: supported frequencies in this band,
@@ -3181,6 +3263,8 @@ enum nl80211_mpath_info {
* @NL80211_BAND_ATTR_VHT_MCS_SET: 32-byte attribute containing the MCS set as
* defined in 802.11ac
* @NL80211_BAND_ATTR_VHT_CAPA: VHT capabilities, as in the HT information IE
+ * @NL80211_BAND_ATTR_IFTYPE_DATA: nested array attribute, with each entry using
+ * attributes from &enum nl80211_band_iftype_attr
* @NL80211_BAND_ATTR_MAX: highest band attribute currently defined
* @__NL80211_BAND_ATTR_AFTER_LAST: internal use
*/
@@ -3196,6 +3280,7 @@ enum nl80211_band_attr {
NL80211_BAND_ATTR_VHT_MCS_SET,
NL80211_BAND_ATTR_VHT_CAPA,
+ NL80211_BAND_ATTR_IFTYPE_DATA,
/* keep last */
__NL80211_BAND_ATTR_AFTER_LAST,
@@ -5133,6 +5218,11 @@ enum nl80211_feature_flags {
* support to nl80211.
* @NL80211_EXT_FEATURE_TXQS: Driver supports FQ-CoDel-enabled intermediate
* TXQs.
+ * @NL80211_EXT_FEATURE_SCAN_RANDOM_SN: Driver/device supports randomizing the
+ * SN in probe request frames if requested by %NL80211_SCAN_FLAG_RANDOM_SN.
+ * @NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT: Driver/device can omit all data
+ * except for supported rates from the probe request content if requested
+ * by the %NL80211_SCAN_FLAG_MIN_PREQ_CONTENT flag.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -5167,6 +5257,8 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_CONTROL_PORT_OVER_NL80211,
NL80211_EXT_FEATURE_DATA_ACK_SIGNAL_SUPPORT,
NL80211_EXT_FEATURE_TXQS,
+ NL80211_EXT_FEATURE_SCAN_RANDOM_SN,
+ NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -5272,6 +5364,12 @@ enum nl80211_timeout_reason {
* possible scan results. This flag hints the driver to use the best
* possible scan configuration to improve the accuracy in scanning.
* Latency and power use may get impacted with this flag.
+ * @NL80211_SCAN_FLAG_RANDOM_SN: randomize the sequence number in probe
+ * request frames from this scan to avoid correlation/tracking being
+ * possible.
+ * @NL80211_SCAN_FLAG_MIN_PREQ_CONTENT: minimize probe request content to
+ * only have supported rates and no additional capabilities (unless
+ * added by userspace explicitly.)
*/
enum nl80211_scan_flags {
NL80211_SCAN_FLAG_LOW_PRIORITY = 1<<0,
@@ -5285,6 +5383,8 @@ enum nl80211_scan_flags {
NL80211_SCAN_FLAG_LOW_SPAN = 1<<8,
NL80211_SCAN_FLAG_LOW_POWER = 1<<9,
NL80211_SCAN_FLAG_HIGH_ACCURACY = 1<<10,
+ NL80211_SCAN_FLAG_RANDOM_SN = 1<<11,
+ NL80211_SCAN_FLAG_MIN_PREQ_CONTENT = 1<<12,
};
/**
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 863aabaa5cc9..dbe0cbe4f1b7 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -840,6 +840,8 @@ struct ovs_action_push_eth {
* @OVS_ACTION_ATTR_POP_NSH: pop the outermost NSH header off the packet.
* @OVS_ACTION_ATTR_METER: Run packet through a meter, which may drop the
* packet, or modify the packet (e.g., change the DSCP field).
+ * @OVS_ACTION_ATTR_CLONE: make a copy of the packet and execute a list of
+ * actions without affecting the original packet and key.
*
* Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
* fields within a header are modifiable, e.g. the IPv4 protocol and fragment
@@ -873,6 +875,7 @@ enum ovs_action_attr {
OVS_ACTION_ATTR_PUSH_NSH, /* Nested OVS_NSH_KEY_ATTR_*. */
OVS_ACTION_ATTR_POP_NSH, /* No argument. */
OVS_ACTION_ATTR_METER, /* u32 meter ID. */
+ OVS_ACTION_ATTR_CLONE, /* Nested OVS_CLONE_ATTR_*. */
__OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
* from userspace. */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 4da87e2ef8a8..ee556ccc93f4 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -636,6 +636,7 @@
#define PCI_EXP_DEVCAP2_OBFF_MASK 0x000c0000 /* OBFF support mechanism */
#define PCI_EXP_DEVCAP2_OBFF_MSG 0x00040000 /* New message signaling */
#define PCI_EXP_DEVCAP2_OBFF_WAKE 0x00080000 /* Re-use WAKE# for OBFF */
+#define PCI_EXP_DEVCAP2_EE_PREFIX 0x00200000 /* End-End TLP Prefix */
#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */
#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */
#define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS 0x0010 /* Completion Timeout Disable */
@@ -960,8 +961,9 @@
#define PCI_REBAR_CTRL 8 /* control register */
#define PCI_REBAR_CTRL_BAR_IDX 0x00000007 /* BAR index */
#define PCI_REBAR_CTRL_NBAR_MASK 0x000000E0 /* # of resizable BARs */
-#define PCI_REBAR_CTRL_NBAR_SHIFT 5 /* shift for # of BARs */
+#define PCI_REBAR_CTRL_NBAR_SHIFT 5 /* shift for # of BARs */
#define PCI_REBAR_CTRL_BAR_SIZE 0x00001F00 /* BAR size */
+#define PCI_REBAR_CTRL_BAR_SHIFT 8 /* shift for BAR size */
/* Dynamic Power Allocation */
#define PCI_DPA_CAP 4 /* capability register */
diff --git a/include/uapi/linux/pcitest.h b/include/uapi/linux/pcitest.h
index 953cf036cb26..cbf422e56696 100644
--- a/include/uapi/linux/pcitest.h
+++ b/include/uapi/linux/pcitest.h
@@ -16,5 +16,8 @@
#define PCITEST_WRITE _IOW('P', 0x4, unsigned long)
#define PCITEST_READ _IOW('P', 0x5, unsigned long)
#define PCITEST_COPY _IOW('P', 0x6, unsigned long)
+#define PCITEST_MSIX _IOW('P', 0x7, int)
+#define PCITEST_SET_IRQTYPE _IOW('P', 0x8, int)
+#define PCITEST_GET_IRQTYPE _IO('P', 0x9)
#endif /* __UAPI_LINUX_PCITEST_H */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index b8e288a1f740..eeb787b1c53c 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -143,6 +143,8 @@ enum perf_event_sample_format {
PERF_SAMPLE_PHYS_ADDR = 1U << 19,
PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */
+
+ __PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63,
};
/*
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 84e4c1d0f874..be382fb0592d 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -45,6 +45,7 @@ enum {
* the skb and act like everything
* is alright.
*/
+#define TC_ACT_VALUE_MAX TC_ACT_TRAP
/* There is a special kind of actions called "extended actions",
* which need a value parameter. These have a local opcode located in
@@ -55,11 +56,12 @@ enum {
#define __TC_ACT_EXT_SHIFT 28
#define __TC_ACT_EXT(local) ((local) << __TC_ACT_EXT_SHIFT)
#define TC_ACT_EXT_VAL_MASK ((1 << __TC_ACT_EXT_SHIFT) - 1)
-#define TC_ACT_EXT_CMP(combined, opcode) \
- (((combined) & (~TC_ACT_EXT_VAL_MASK)) == opcode)
+#define TC_ACT_EXT_OPCODE(combined) ((combined) & (~TC_ACT_EXT_VAL_MASK))
+#define TC_ACT_EXT_CMP(combined, opcode) (TC_ACT_EXT_OPCODE(combined) == opcode)
#define TC_ACT_JUMP __TC_ACT_EXT(1)
#define TC_ACT_GOTO_CHAIN __TC_ACT_EXT(2)
+#define TC_ACT_EXT_OPCODE_MAX TC_ACT_GOTO_CHAIN
/* Action type identifiers*/
enum {
@@ -469,12 +471,47 @@ enum {
TCA_FLOWER_KEY_IP_TTL, /* u8 */
TCA_FLOWER_KEY_IP_TTL_MASK, /* u8 */
+ TCA_FLOWER_KEY_CVLAN_ID, /* be16 */
+ TCA_FLOWER_KEY_CVLAN_PRIO, /* u8 */
+ TCA_FLOWER_KEY_CVLAN_ETH_TYPE, /* be16 */
+
+ TCA_FLOWER_KEY_ENC_IP_TOS, /* u8 */
+ TCA_FLOWER_KEY_ENC_IP_TOS_MASK, /* u8 */
+ TCA_FLOWER_KEY_ENC_IP_TTL, /* u8 */
+ TCA_FLOWER_KEY_ENC_IP_TTL_MASK, /* u8 */
+
+ TCA_FLOWER_KEY_ENC_OPTS,
+ TCA_FLOWER_KEY_ENC_OPTS_MASK,
+
__TCA_FLOWER_MAX,
};
#define TCA_FLOWER_MAX (__TCA_FLOWER_MAX - 1)
enum {
+ TCA_FLOWER_KEY_ENC_OPTS_UNSPEC,
+ TCA_FLOWER_KEY_ENC_OPTS_GENEVE, /* Nested
+ * TCA_FLOWER_KEY_ENC_OPT_GENEVE_
+ * attributes
+ */
+ __TCA_FLOWER_KEY_ENC_OPTS_MAX,
+};
+
+#define TCA_FLOWER_KEY_ENC_OPTS_MAX (__TCA_FLOWER_KEY_ENC_OPTS_MAX - 1)
+
+enum {
+ TCA_FLOWER_KEY_ENC_OPT_GENEVE_UNSPEC,
+ TCA_FLOWER_KEY_ENC_OPT_GENEVE_CLASS, /* u16 */
+ TCA_FLOWER_KEY_ENC_OPT_GENEVE_TYPE, /* u8 */
+ TCA_FLOWER_KEY_ENC_OPT_GENEVE_DATA, /* 4 to 128 bytes */
+
+ __TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX,
+};
+
+#define TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX \
+ (__TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX - 1)
+
+enum {
TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0),
TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 37b5096ae97b..8975fd1a1421 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -124,6 +124,21 @@ struct tc_fifo_qopt {
__u32 limit; /* Queue length: bytes for bfifo, packets for pfifo */
};
+/* SKBPRIO section */
+
+/*
+ * Priorities go from zero to (SKBPRIO_MAX_PRIORITY - 1).
+ * SKBPRIO_MAX_PRIORITY should be at least 64 in order for skbprio to be able
+ * to map one to one the DS field of IPV4 and IPV6 headers.
+ * Memory allocation grows linearly with SKBPRIO_MAX_PRIORITY.
+ */
+
+#define SKBPRIO_MAX_PRIORITY 64
+
+struct tc_skbprio_qopt {
+ __u32 limit; /* Queue length in packets. */
+};
+
/* PRIO section */
#define TCQ_PRIO_BANDS 16
@@ -539,6 +554,7 @@ enum {
TCA_NETEM_LATENCY64,
TCA_NETEM_JITTER64,
TCA_NETEM_SLOT,
+ TCA_NETEM_SLOT_DIST,
__TCA_NETEM_MAX,
};
@@ -581,6 +597,8 @@ struct tc_netem_slot {
__s64 max_delay;
__s32 max_packets;
__s32 max_bytes;
+ __s64 dist_delay; /* nsec */
+ __s64 dist_jitter; /* nsec */
};
enum {
@@ -934,4 +952,136 @@ enum {
#define TCA_CBS_MAX (__TCA_CBS_MAX - 1)
+
+/* ETF */
+struct tc_etf_qopt {
+ __s32 delta;
+ __s32 clockid;
+ __u32 flags;
+#define TC_ETF_DEADLINE_MODE_ON BIT(0)
+#define TC_ETF_OFFLOAD_ON BIT(1)
+};
+
+enum {
+ TCA_ETF_UNSPEC,
+ TCA_ETF_PARMS,
+ __TCA_ETF_MAX,
+};
+
+#define TCA_ETF_MAX (__TCA_ETF_MAX - 1)
+
+
+/* CAKE */
+enum {
+ TCA_CAKE_UNSPEC,
+ TCA_CAKE_PAD,
+ TCA_CAKE_BASE_RATE64,
+ TCA_CAKE_DIFFSERV_MODE,
+ TCA_CAKE_ATM,
+ TCA_CAKE_FLOW_MODE,
+ TCA_CAKE_OVERHEAD,
+ TCA_CAKE_RTT,
+ TCA_CAKE_TARGET,
+ TCA_CAKE_AUTORATE,
+ TCA_CAKE_MEMORY,
+ TCA_CAKE_NAT,
+ TCA_CAKE_RAW,
+ TCA_CAKE_WASH,
+ TCA_CAKE_MPU,
+ TCA_CAKE_INGRESS,
+ TCA_CAKE_ACK_FILTER,
+ TCA_CAKE_SPLIT_GSO,
+ __TCA_CAKE_MAX
+};
+#define TCA_CAKE_MAX (__TCA_CAKE_MAX - 1)
+
+enum {
+ __TCA_CAKE_STATS_INVALID,
+ TCA_CAKE_STATS_PAD,
+ TCA_CAKE_STATS_CAPACITY_ESTIMATE64,
+ TCA_CAKE_STATS_MEMORY_LIMIT,
+ TCA_CAKE_STATS_MEMORY_USED,
+ TCA_CAKE_STATS_AVG_NETOFF,
+ TCA_CAKE_STATS_MIN_NETLEN,
+ TCA_CAKE_STATS_MAX_NETLEN,
+ TCA_CAKE_STATS_MIN_ADJLEN,
+ TCA_CAKE_STATS_MAX_ADJLEN,
+ TCA_CAKE_STATS_TIN_STATS,
+ TCA_CAKE_STATS_DEFICIT,
+ TCA_CAKE_STATS_COBALT_COUNT,
+ TCA_CAKE_STATS_DROPPING,
+ TCA_CAKE_STATS_DROP_NEXT_US,
+ TCA_CAKE_STATS_P_DROP,
+ TCA_CAKE_STATS_BLUE_TIMER_US,
+ __TCA_CAKE_STATS_MAX
+};
+#define TCA_CAKE_STATS_MAX (__TCA_CAKE_STATS_MAX - 1)
+
+enum {
+ __TCA_CAKE_TIN_STATS_INVALID,
+ TCA_CAKE_TIN_STATS_PAD,
+ TCA_CAKE_TIN_STATS_SENT_PACKETS,
+ TCA_CAKE_TIN_STATS_SENT_BYTES64,
+ TCA_CAKE_TIN_STATS_DROPPED_PACKETS,
+ TCA_CAKE_TIN_STATS_DROPPED_BYTES64,
+ TCA_CAKE_TIN_STATS_ACKS_DROPPED_PACKETS,
+ TCA_CAKE_TIN_STATS_ACKS_DROPPED_BYTES64,
+ TCA_CAKE_TIN_STATS_ECN_MARKED_PACKETS,
+ TCA_CAKE_TIN_STATS_ECN_MARKED_BYTES64,
+ TCA_CAKE_TIN_STATS_BACKLOG_PACKETS,
+ TCA_CAKE_TIN_STATS_BACKLOG_BYTES,
+ TCA_CAKE_TIN_STATS_THRESHOLD_RATE64,
+ TCA_CAKE_TIN_STATS_TARGET_US,
+ TCA_CAKE_TIN_STATS_INTERVAL_US,
+ TCA_CAKE_TIN_STATS_WAY_INDIRECT_HITS,
+ TCA_CAKE_TIN_STATS_WAY_MISSES,
+ TCA_CAKE_TIN_STATS_WAY_COLLISIONS,
+ TCA_CAKE_TIN_STATS_PEAK_DELAY_US,
+ TCA_CAKE_TIN_STATS_AVG_DELAY_US,
+ TCA_CAKE_TIN_STATS_BASE_DELAY_US,
+ TCA_CAKE_TIN_STATS_SPARSE_FLOWS,
+ TCA_CAKE_TIN_STATS_BULK_FLOWS,
+ TCA_CAKE_TIN_STATS_UNRESPONSIVE_FLOWS,
+ TCA_CAKE_TIN_STATS_MAX_SKBLEN,
+ TCA_CAKE_TIN_STATS_FLOW_QUANTUM,
+ __TCA_CAKE_TIN_STATS_MAX
+};
+#define TCA_CAKE_TIN_STATS_MAX (__TCA_CAKE_TIN_STATS_MAX - 1)
+#define TC_CAKE_MAX_TINS (8)
+
+enum {
+ CAKE_FLOW_NONE = 0,
+ CAKE_FLOW_SRC_IP,
+ CAKE_FLOW_DST_IP,
+ CAKE_FLOW_HOSTS, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_DST_IP */
+ CAKE_FLOW_FLOWS,
+ CAKE_FLOW_DUAL_SRC, /* = CAKE_FLOW_SRC_IP | CAKE_FLOW_FLOWS */
+ CAKE_FLOW_DUAL_DST, /* = CAKE_FLOW_DST_IP | CAKE_FLOW_FLOWS */
+ CAKE_FLOW_TRIPLE, /* = CAKE_FLOW_HOSTS | CAKE_FLOW_FLOWS */
+ CAKE_FLOW_MAX,
+};
+
+enum {
+ CAKE_DIFFSERV_DIFFSERV3 = 0,
+ CAKE_DIFFSERV_DIFFSERV4,
+ CAKE_DIFFSERV_DIFFSERV8,
+ CAKE_DIFFSERV_BESTEFFORT,
+ CAKE_DIFFSERV_PRECEDENCE,
+ CAKE_DIFFSERV_MAX
+};
+
+enum {
+ CAKE_ACK_NONE = 0,
+ CAKE_ACK_FILTER,
+ CAKE_ACK_AGGRESSIVE,
+ CAKE_ACK_MAX
+};
+
+enum {
+ CAKE_ATM_NONE = 0,
+ CAKE_ATM_ATM,
+ CAKE_ATM_PTM,
+ CAKE_ATM_MAX
+};
+
#endif
diff --git a/include/uapi/linux/pmu.h b/include/uapi/linux/pmu.h
index 89cb1acea93a..97256f90e6df 100644
--- a/include/uapi/linux/pmu.h
+++ b/include/uapi/linux/pmu.h
@@ -93,8 +93,8 @@ enum {
PMU_HEATHROW_BASED, /* PowerBook G3 series */
PMU_PADDINGTON_BASED, /* 1999 PowerBook G3 */
PMU_KEYLARGO_BASED, /* Core99 motherboard (PMU99) */
- PMU_68K_V1, /* 68K PMU, version 1 */
- PMU_68K_V2, /* 68K PMU, version 2 */
+ PMU_68K_V1, /* Unused/deprecated */
+ PMU_68K_V2, /* Unused/deprecated */
};
/* PMU PMU_POWER_EVENTS commands */
diff --git a/include/uapi/linux/ppp-ioctl.h b/include/uapi/linux/ppp-ioctl.h
index 784c2e3e572e..88b5f9990320 100644
--- a/include/uapi/linux/ppp-ioctl.h
+++ b/include/uapi/linux/ppp-ioctl.h
@@ -68,7 +68,7 @@ struct ppp_option_data {
struct pppol2tp_ioc_stats {
__u16 tunnel_id; /* redundant */
__u16 session_id; /* if zero, get tunnel stats */
- __u32 using_ipsec:1; /* valid only for session_id == 0 */
+ __u32 using_ipsec:1;
__aligned_u64 tx_packets;
__aligned_u64 tx_bytes;
__aligned_u64 tx_errors;
diff --git a/include/uapi/linux/rds.h b/include/uapi/linux/rds.h
index 20c6bd0b0007..dc520e1a4123 100644
--- a/include/uapi/linux/rds.h
+++ b/include/uapi/linux/rds.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
/*
- * Copyright (c) 2008 Oracle. All rights reserved.
+ * Copyright (c) 2008, 2018 Oracle and/or its affiliates. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -118,7 +118,17 @@
#define RDS_INFO_IB_CONNECTIONS 10008
#define RDS_INFO_CONNECTION_STATS 10009
#define RDS_INFO_IWARP_CONNECTIONS 10010
-#define RDS_INFO_LAST 10010
+
+/* PF_RDS6 options */
+#define RDS6_INFO_CONNECTIONS 10011
+#define RDS6_INFO_SEND_MESSAGES 10012
+#define RDS6_INFO_RETRANS_MESSAGES 10013
+#define RDS6_INFO_RECV_MESSAGES 10014
+#define RDS6_INFO_SOCKETS 10015
+#define RDS6_INFO_TCP_SOCKETS 10016
+#define RDS6_INFO_IB_CONNECTIONS 10017
+
+#define RDS_INFO_LAST 10017
struct rds_info_counter {
__u8 name[32];
@@ -140,6 +150,15 @@ struct rds_info_connection {
__u8 flags;
} __attribute__((packed));
+struct rds6_info_connection {
+ __u64 next_tx_seq;
+ __u64 next_rx_seq;
+ struct in6_addr laddr;
+ struct in6_addr faddr;
+ __u8 transport[TRANSNAMSIZ]; /* null term ascii */
+ __u8 flags;
+} __attribute__((packed));
+
#define RDS_INFO_MESSAGE_FLAG_ACK 0x01
#define RDS_INFO_MESSAGE_FLAG_FAST_ACK 0x02
@@ -153,6 +172,17 @@ struct rds_info_message {
__u8 flags;
} __attribute__((packed));
+struct rds6_info_message {
+ __u64 seq;
+ __u32 len;
+ struct in6_addr laddr;
+ struct in6_addr faddr;
+ __be16 lport;
+ __be16 fport;
+ __u8 flags;
+ __u8 tos;
+} __attribute__((packed));
+
struct rds_info_socket {
__u32 sndbuf;
__be32 bound_addr;
@@ -163,6 +193,16 @@ struct rds_info_socket {
__u64 inum;
} __attribute__((packed));
+struct rds6_info_socket {
+ __u32 sndbuf;
+ struct in6_addr bound_addr;
+ struct in6_addr connected_addr;
+ __be16 bound_port;
+ __be16 connected_port;
+ __u32 rcvbuf;
+ __u64 inum;
+} __attribute__((packed));
+
struct rds_info_tcp_socket {
__be32 local_addr;
__be16 local_port;
@@ -175,6 +215,18 @@ struct rds_info_tcp_socket {
__u32 last_seen_una;
} __attribute__((packed));
+struct rds6_info_tcp_socket {
+ struct in6_addr local_addr;
+ __be16 local_port;
+ struct in6_addr peer_addr;
+ __be16 peer_port;
+ __u64 hdr_rem;
+ __u64 data_rem;
+ __u32 last_sent_nxt;
+ __u32 last_expected_una;
+ __u32 last_seen_una;
+} __attribute__((packed));
+
#define RDS_IB_GID_LEN 16
struct rds_info_rdma_connection {
__be32 src_addr;
@@ -189,6 +241,19 @@ struct rds_info_rdma_connection {
__u32 rdma_mr_size;
};
+struct rds6_info_rdma_connection {
+ struct in6_addr src_addr;
+ struct in6_addr dst_addr;
+ __u8 src_gid[RDS_IB_GID_LEN];
+ __u8 dst_gid[RDS_IB_GID_LEN];
+
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_send_sge;
+ __u32 rdma_mr_max;
+ __u32 rdma_mr_size;
+};
+
/* RDS message Receive Path Latency points */
enum rds_message_rxpath_latency {
RDS_MSG_RX_HDR_TO_DGRAM_START = 0,
diff --git a/include/uapi/linux/rseq.h b/include/uapi/linux/rseq.h
index d620fa43756c..9a402fdb60e9 100644
--- a/include/uapi/linux/rseq.h
+++ b/include/uapi/linux/rseq.h
@@ -10,13 +10,8 @@
* Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
*/
-#ifdef __KERNEL__
-# include <linux/types.h>
-#else
-# include <stdint.h>
-#endif
-
-#include <linux/types_32_64.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
enum rseq_cpu_id_state {
RSEQ_CPU_ID_UNINITIALIZED = -1,
@@ -52,10 +47,10 @@ struct rseq_cs {
__u32 version;
/* enum rseq_cs_flags */
__u32 flags;
- LINUX_FIELD_u32_u64(start_ip);
+ __u64 start_ip;
/* Offset from start_ip. */
- LINUX_FIELD_u32_u64(post_commit_offset);
- LINUX_FIELD_u32_u64(abort_ip);
+ __u64 post_commit_offset;
+ __u64 abort_ip;
} __attribute__((aligned(4 * sizeof(__u64))));
/*
@@ -67,28 +62,30 @@ struct rseq_cs {
struct rseq {
/*
* Restartable sequences cpu_id_start field. Updated by the
- * kernel, and read by user-space with single-copy atomicity
- * semantics. Aligned on 32-bit. Always contains a value in the
- * range of possible CPUs, although the value may not be the
- * actual current CPU (e.g. if rseq is not initialized). This
- * CPU number value should always be compared against the value
- * of the cpu_id field before performing a rseq commit or
- * returning a value read from a data structure indexed using
- * the cpu_id_start value.
+ * kernel. Read by user-space with single-copy atomicity
+ * semantics. This field should only be read by the thread which
+ * registered this data structure. Aligned on 32-bit. Always
+ * contains a value in the range of possible CPUs, although the
+ * value may not be the actual current CPU (e.g. if rseq is not
+ * initialized). This CPU number value should always be compared
+ * against the value of the cpu_id field before performing a rseq
+ * commit or returning a value read from a data structure indexed
+ * using the cpu_id_start value.
*/
__u32 cpu_id_start;
/*
- * Restartable sequences cpu_id field. Updated by the kernel,
- * and read by user-space with single-copy atomicity semantics.
- * Aligned on 32-bit. Values RSEQ_CPU_ID_UNINITIALIZED and
- * RSEQ_CPU_ID_REGISTRATION_FAILED have a special semantic: the
- * former means "rseq uninitialized", and latter means "rseq
- * initialization failed". This value is meant to be read within
- * rseq critical sections and compared with the cpu_id_start
- * value previously read, before performing the commit instruction,
- * or read and compared with the cpu_id_start value before returning
- * a value loaded from a data structure indexed using the
- * cpu_id_start value.
+ * Restartable sequences cpu_id field. Updated by the kernel.
+ * Read by user-space with single-copy atomicity semantics. This
+ * field should only be read by the thread which registered this
+ * data structure. Aligned on 32-bit. Values
+ * RSEQ_CPU_ID_UNINITIALIZED and RSEQ_CPU_ID_REGISTRATION_FAILED
+ * have a special semantic: the former means "rseq uninitialized",
+ * and latter means "rseq initialization failed". This value is
+ * meant to be read within rseq critical sections and compared
+ * with the cpu_id_start value previously read, before performing
+ * the commit instruction, or read and compared with the
+ * cpu_id_start value before returning a value loaded from a data
+ * structure indexed using the cpu_id_start value.
*/
__u32 cpu_id;
/*
@@ -105,27 +102,44 @@ struct rseq {
* targeted by the rseq_cs. Also needs to be set to NULL by user-space
* before reclaiming memory that contains the targeted struct rseq_cs.
*
- * Read and set by the kernel with single-copy atomicity semantics.
- * Set by user-space with single-copy atomicity semantics. Aligned
- * on 64-bit.
+ * Read and set by the kernel. Set by user-space with single-copy
+ * atomicity semantics. This field should only be updated by the
+ * thread which registered this data structure. Aligned on 64-bit.
*/
- LINUX_FIELD_u32_u64(rseq_cs);
+ union {
+ __u64 ptr64;
+#ifdef __LP64__
+ __u64 ptr;
+#else
+ struct {
+#if (defined(__BYTE_ORDER) && (__BYTE_ORDER == __BIG_ENDIAN)) || defined(__BIG_ENDIAN)
+ __u32 padding; /* Initialized to zero. */
+ __u32 ptr32;
+#else /* LITTLE */
+ __u32 ptr32;
+ __u32 padding; /* Initialized to zero. */
+#endif /* ENDIAN */
+ } ptr;
+#endif
+ } rseq_cs;
+
/*
- * - RSEQ_DISABLE flag:
+ * Restartable sequences flags field.
+ *
+ * This field should only be updated by the thread which
+ * registered this data structure. Read by the kernel.
+ * Mainly used for single-stepping through rseq critical sections
+ * with debuggers.
*
- * Fallback fast-track flag for single-stepping.
- * Set by user-space if lack of progress is detected.
- * Cleared by user-space after rseq finish.
- * Read by the kernel.
* - RSEQ_CS_FLAG_NO_RESTART_ON_PREEMPT
- * Inhibit instruction sequence block restart and event
- * counter increment on preemption for this thread.
+ * Inhibit instruction sequence block restart on preemption
+ * for this thread.
* - RSEQ_CS_FLAG_NO_RESTART_ON_SIGNAL
- * Inhibit instruction sequence block restart and event
- * counter increment on signal delivery for this thread.
+ * Inhibit instruction sequence block restart on signal
+ * delivery for this thread.
* - RSEQ_CS_FLAG_NO_RESTART_ON_MIGRATE
- * Inhibit instruction sequence block restart and event
- * counter increment on migration for this thread.
+ * Inhibit instruction sequence block restart on migration for
+ * this thread.
*/
__u32 flags;
} __attribute__((aligned(4 * sizeof(__u64))));
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 7d8502313c99..46399367627f 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -150,6 +150,13 @@ enum {
RTM_NEWCACHEREPORT = 96,
#define RTM_NEWCACHEREPORT RTM_NEWCACHEREPORT
+ RTM_NEWCHAIN = 100,
+#define RTM_NEWCHAIN RTM_NEWCHAIN
+ RTM_DELCHAIN,
+#define RTM_DELCHAIN RTM_DELCHAIN
+ RTM_GETCHAIN,
+#define RTM_GETCHAIN RTM_GETCHAIN
+
__RTM_MAX,
#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
};
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index b64d583bf053..b479db5c71d9 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -100,6 +100,7 @@ typedef __s32 sctp_assoc_t;
#define SCTP_RECVNXTINFO 33
#define SCTP_DEFAULT_SNDINFO 34
#define SCTP_AUTH_DEACTIVATE_KEY 35
+#define SCTP_REUSE_PORT 36
/* Internal Socket Options. Some of the sctp library functions are
* implemented using these socket options.
@@ -762,6 +763,8 @@ enum sctp_spp_flags {
SPP_SACKDELAY_DISABLE = 1<<6, /*Disable SACK*/
SPP_SACKDELAY = SPP_SACKDELAY_ENABLE | SPP_SACKDELAY_DISABLE,
SPP_HB_TIME_IS_ZERO = 1<<7, /* Set HB delay to 0 */
+ SPP_IPV6_FLOWLABEL = 1<<8,
+ SPP_DSCP = 1<<9,
};
struct sctp_paddrparams {
@@ -772,6 +775,8 @@ struct sctp_paddrparams {
__u32 spp_pathmtu;
__u32 spp_sackdelay;
__u32 spp_flags;
+ __u32 spp_ipv6_flowlabel;
+ __u8 spp_dscp;
} __attribute__((packed, aligned(4)));
/*
diff --git a/include/uapi/linux/smc_diag.h b/include/uapi/linux/smc_diag.h
index 0ae5d4685ba3..ac9e8c96d9bd 100644
--- a/include/uapi/linux/smc_diag.h
+++ b/include/uapi/linux/smc_diag.h
@@ -20,7 +20,7 @@ struct smc_diag_req {
struct smc_diag_msg {
__u8 diag_family;
__u8 diag_state;
- __u8 diag_fallback;
+ __u8 diag_mode;
__u8 diag_shutdown;
struct inet_diag_sockid id;
@@ -28,6 +28,13 @@ struct smc_diag_msg {
__u64 diag_inode;
};
+/* Mode of a connection */
+enum {
+ SMC_DIAG_MODE_SMCR,
+ SMC_DIAG_MODE_FALLBACK_TCP,
+ SMC_DIAG_MODE_SMCD,
+};
+
/* Extensions */
enum {
@@ -35,6 +42,8 @@ enum {
SMC_DIAG_CONNINFO,
SMC_DIAG_LGRINFO,
SMC_DIAG_SHUTDOWN,
+ SMC_DIAG_DMBINFO,
+ SMC_DIAG_FALLBACK,
__SMC_DIAG_MAX,
};
@@ -83,4 +92,18 @@ struct smc_diag_lgrinfo {
struct smc_diag_linkinfo lnk[1];
__u8 role;
};
+
+struct smc_diag_fallback {
+ __u32 reason;
+ __u32 peer_diagnosis;
+};
+
+struct smcd_diag_dmbinfo { /* SMC-D Socket internals */
+ __u32 linkid; /* Link identifier */
+ __u64 peer_gid; /* Peer GID */
+ __u64 my_gid; /* My GID */
+ __u64 token; /* Token of DMB */
+ __u64 peer_token; /* Token of remote DMBE */
+};
+
#endif /* _UAPI_SMC_DIAG_H_ */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 750d89120335..f80135e5feaa 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -56,6 +56,7 @@ enum
IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */
IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */
IPSTATS_MIB_CEPKTS, /* InCEPkts */
+ IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */
__IPSTATS_MIB_MAX
};
@@ -279,6 +280,8 @@ enum
LINUX_MIB_TCPDELIVERED, /* TCPDelivered */
LINUX_MIB_TCPDELIVEREDCE, /* TCPDeliveredCE */
LINUX_MIB_TCPACKCOMPRESSED, /* TCPAckCompressed */
+ LINUX_MIB_TCPZEROWINDOWDROP, /* TCPZeroWindowDrop */
+ LINUX_MIB_TCPRCVQDROP, /* TCPRcvQDrop */
__LINUX_MIB_MAX
};
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 6b58371b1f0d..d71013fffaf6 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -575,7 +575,8 @@ enum {
/* /proc/sys/net/ipv6/icmp */
enum {
- NET_IPV6_ICMP_RATELIMIT=1
+ NET_IPV6_ICMP_RATELIMIT = 1,
+ NET_IPV6_ICMP_ECHO_IGNORE_ALL = 2
};
/* /proc/sys/net/<protocol>/neigh/<dev> */
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
index 6e299349b158..b7b57967d90f 100644
--- a/include/uapi/linux/target_core_user.h
+++ b/include/uapi/linux/target_core_user.h
@@ -44,6 +44,7 @@
#define TCMU_MAILBOX_VERSION 2
#define ALIGN_SIZE 64 /* Should be enough for most CPUs */
#define TCMU_MAILBOX_FLAG_CAP_OOOC (1 << 0) /* Out-of-order completions */
+#define TCMU_MAILBOX_FLAG_CAP_READ_LEN (1 << 1) /* Read data length */
struct tcmu_mailbox {
__u16 version;
@@ -71,6 +72,7 @@ struct tcmu_cmd_entry_hdr {
__u16 cmd_id;
__u8 kflags;
#define TCMU_UFLAG_UNKNOWN_OP 0x1
+#define TCMU_UFLAG_READ_LEN 0x2
__u8 uflags;
} __packed;
@@ -119,7 +121,7 @@ struct tcmu_cmd_entry {
__u8 scsi_status;
__u8 __pad1;
__u16 __pad2;
- __u32 __pad3;
+ __u32 read_len;
char sense_buffer[TCMU_SENSE_BUFFERSIZE];
} rsp;
};
diff --git a/include/uapi/linux/tc_act/tc_pedit.h b/include/uapi/linux/tc_act/tc_pedit.h
index 162d1094c41c..24ec792dacc1 100644
--- a/include/uapi/linux/tc_act/tc_pedit.h
+++ b/include/uapi/linux/tc_act/tc_pedit.h
@@ -17,13 +17,15 @@ enum {
TCA_PEDIT_KEY_EX,
__TCA_PEDIT_MAX
};
+
#define TCA_PEDIT_MAX (__TCA_PEDIT_MAX - 1)
-
+
enum {
TCA_PEDIT_KEY_EX_HTYPE = 1,
TCA_PEDIT_KEY_EX_CMD = 2,
__TCA_PEDIT_KEY_EX_MAX
};
+
#define TCA_PEDIT_KEY_EX_MAX (__TCA_PEDIT_KEY_EX_MAX - 1)
/* TCA_PEDIT_KEY_EX_HDR_TYPE_NETWROK is a special case for legacy users. It
@@ -38,6 +40,7 @@ enum pedit_header_type {
TCA_PEDIT_KEY_EX_HDR_TYPE_UDP = 5,
__PEDIT_HDR_TYPE_MAX,
};
+
#define TCA_PEDIT_HDR_TYPE_MAX (__PEDIT_HDR_TYPE_MAX - 1)
enum pedit_cmd {
@@ -45,6 +48,7 @@ enum pedit_cmd {
TCA_PEDIT_KEY_EX_CMD_ADD = 1,
__PEDIT_CMD_MAX,
};
+
#define TCA_PEDIT_CMD_MAX (__PEDIT_CMD_MAX - 1)
struct tc_pedit_key {
@@ -55,13 +59,14 @@ struct tc_pedit_key {
__u32 offmask;
__u32 shift;
};
-
+
struct tc_pedit_sel {
tc_gen;
unsigned char nkeys;
unsigned char flags;
struct tc_pedit_key keys[0];
};
+
#define tc_pedit tc_pedit_sel
#endif
diff --git a/include/uapi/linux/tc_act/tc_skbedit.h b/include/uapi/linux/tc_act/tc_skbedit.h
index fbcfe27a4e6c..6de6071ebed6 100644
--- a/include/uapi/linux/tc_act/tc_skbedit.h
+++ b/include/uapi/linux/tc_act/tc_skbedit.h
@@ -30,6 +30,7 @@
#define SKBEDIT_F_MARK 0x4
#define SKBEDIT_F_PTYPE 0x8
#define SKBEDIT_F_MASK 0x10
+#define SKBEDIT_F_INHERITDSFIELD 0x20
struct tc_skbedit {
tc_gen;
@@ -45,6 +46,7 @@ enum {
TCA_SKBEDIT_PAD,
TCA_SKBEDIT_PTYPE,
TCA_SKBEDIT_MASK,
+ TCA_SKBEDIT_FLAGS,
__TCA_SKBEDIT_MAX
};
#define TCA_SKBEDIT_MAX (__TCA_SKBEDIT_MAX - 1)
diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h
index 72bbefe5d1d1..be384d63e1b5 100644
--- a/include/uapi/linux/tc_act/tc_tunnel_key.h
+++ b/include/uapi/linux/tc_act/tc_tunnel_key.h
@@ -36,9 +36,37 @@ enum {
TCA_TUNNEL_KEY_PAD,
TCA_TUNNEL_KEY_ENC_DST_PORT, /* be16 */
TCA_TUNNEL_KEY_NO_CSUM, /* u8 */
+ TCA_TUNNEL_KEY_ENC_OPTS, /* Nested TCA_TUNNEL_KEY_ENC_OPTS_
+ * attributes
+ */
+ TCA_TUNNEL_KEY_ENC_TOS, /* u8 */
+ TCA_TUNNEL_KEY_ENC_TTL, /* u8 */
__TCA_TUNNEL_KEY_MAX,
};
#define TCA_TUNNEL_KEY_MAX (__TCA_TUNNEL_KEY_MAX - 1)
+enum {
+ TCA_TUNNEL_KEY_ENC_OPTS_UNSPEC,
+ TCA_TUNNEL_KEY_ENC_OPTS_GENEVE, /* Nested
+ * TCA_TUNNEL_KEY_ENC_OPTS_
+ * attributes
+ */
+ __TCA_TUNNEL_KEY_ENC_OPTS_MAX,
+};
+
+#define TCA_TUNNEL_KEY_ENC_OPTS_MAX (__TCA_TUNNEL_KEY_ENC_OPTS_MAX - 1)
+
+enum {
+ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_UNSPEC,
+ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_CLASS, /* be16 */
+ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_TYPE, /* u8 */
+ TCA_TUNNEL_KEY_ENC_OPT_GENEVE_DATA, /* 4 to 128 bytes */
+
+ __TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX,
+};
+
+#define TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX \
+ (__TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX - 1)
+
#endif
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 29eb659aa77a..e02d31986ff9 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -127,6 +127,10 @@ enum {
#define TCP_CM_INQ TCP_INQ
+#define TCP_REPAIR_ON 1
+#define TCP_REPAIR_OFF 0
+#define TCP_REPAIR_OFF_NO_WP -1 /* Turn off without window probes */
+
struct tcp_repair_opt {
__u32 opt_code;
__u32 opt_val;
@@ -231,6 +235,11 @@ struct tcp_info {
__u32 tcpi_delivered;
__u32 tcpi_delivered_ce;
+
+ __u64 tcpi_bytes_sent; /* RFC4898 tcpEStatsPerfHCDataOctetsOut */
+ __u64 tcpi_bytes_retrans; /* RFC4898 tcpEStatsPerfOctetsRetrans */
+ __u32 tcpi_dsack_dups; /* RFC4898 tcpEStatsStackDSACKDups */
+ __u32 tcpi_reord_seen; /* reordering events seen */
};
/* netlink attributes types for SCM_TIMESTAMPING_OPT_STATS */
@@ -253,7 +262,10 @@ enum {
TCP_NLA_SND_SSTHRESH, /* Slow start size threshold */
TCP_NLA_DELIVERED, /* Data pkts delivered incl. out-of-order */
TCP_NLA_DELIVERED_CE, /* Like above but only ones w/ CE marks */
-
+ TCP_NLA_BYTES_SENT, /* Data bytes sent including retransmission */
+ TCP_NLA_BYTES_RETRANS, /* Data bytes retransmitted */
+ TCP_NLA_DSACK_DUPS, /* DSACK blocks received */
+ TCP_NLA_REORD_SEEN, /* reordering events seen */
};
/* for TCP_MD5SIG socket option */
diff --git a/include/uapi/linux/time.h b/include/uapi/linux/time.h
index fcf936656493..6b56a2208be7 100644
--- a/include/uapi/linux/time.h
+++ b/include/uapi/linux/time.h
@@ -49,6 +49,13 @@ struct __kernel_timespec {
};
#endif
+#ifndef __kernel_itimerspec
+struct __kernel_itimerspec {
+ struct __kernel_timespec it_interval; /* timer period */
+ struct __kernel_timespec it_value; /* timer expiration */
+};
+#endif
+
/*
* legacy timeval structure, only embedded in structures that
* traditionally used 'timeval' to pass time intervals (not absolute
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index 85c11982c89b..0ebe02ef1a86 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -121,6 +121,7 @@ enum {
TIPC_NLA_SOCK_TIPC_STATE, /* u32 */
TIPC_NLA_SOCK_COOKIE, /* u64 */
TIPC_NLA_SOCK_PAD, /* flag */
+ TIPC_NLA_SOCK_GROUP, /* nest */
__TIPC_NLA_SOCK_MAX,
TIPC_NLA_SOCK_MAX = __TIPC_NLA_SOCK_MAX - 1
@@ -233,6 +234,19 @@ enum {
TIPC_NLA_MON_PEER_MAX = __TIPC_NLA_MON_PEER_MAX - 1
};
+/* Nest, socket group info */
+enum {
+ TIPC_NLA_SOCK_GROUP_ID, /* u32 */
+ TIPC_NLA_SOCK_GROUP_OPEN, /* flag */
+ TIPC_NLA_SOCK_GROUP_NODE_SCOPE, /* flag */
+ TIPC_NLA_SOCK_GROUP_CLUSTER_SCOPE, /* flag */
+ TIPC_NLA_SOCK_GROUP_INSTANCE, /* u32 */
+ TIPC_NLA_SOCK_GROUP_BC_SEND_NEXT, /* u32 */
+
+ __TIPC_NLA_SOCK_GROUP_MAX,
+ TIPC_NLA_SOCK_GROUP_MAX = __TIPC_NLA_SOCK_GROUP_MAX - 1
+};
+
/* Nest, connection info */
enum {
TIPC_NLA_CON_UNSPEC,
diff --git a/include/uapi/linux/types_32_64.h b/include/uapi/linux/types_32_64.h
deleted file mode 100644
index 0a87ace34a57..000000000000
--- a/include/uapi/linux/types_32_64.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-#ifndef _UAPI_LINUX_TYPES_32_64_H
-#define _UAPI_LINUX_TYPES_32_64_H
-
-/*
- * linux/types_32_64.h
- *
- * Integer type declaration for pointers across 32-bit and 64-bit systems.
- *
- * Copyright (c) 2015-2018 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
- */
-
-#ifdef __KERNEL__
-# include <linux/types.h>
-#else
-# include <stdint.h>
-#endif
-
-#include <asm/byteorder.h>
-
-#ifdef __BYTE_ORDER
-# if (__BYTE_ORDER == __BIG_ENDIAN)
-# define LINUX_BYTE_ORDER_BIG_ENDIAN
-# else
-# define LINUX_BYTE_ORDER_LITTLE_ENDIAN
-# endif
-#else
-# ifdef __BIG_ENDIAN
-# define LINUX_BYTE_ORDER_BIG_ENDIAN
-# else
-# define LINUX_BYTE_ORDER_LITTLE_ENDIAN
-# endif
-#endif
-
-#ifdef __LP64__
-# define LINUX_FIELD_u32_u64(field) __u64 field
-# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) field = (intptr_t)v
-#else
-# ifdef LINUX_BYTE_ORDER_BIG_ENDIAN
-# define LINUX_FIELD_u32_u64(field) __u32 field ## _padding, field
-# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \
- field ## _padding = 0, field = (intptr_t)v
-# else
-# define LINUX_FIELD_u32_u64(field) __u32 field, field ## _padding
-# define LINUX_FIELD_u32_u64_INIT_ONSTACK(field, v) \
- field = (intptr_t)v, field ## _padding = 0
-# endif
-#endif
-
-#endif /* _UAPI_LINUX_TYPES_32_64_H */
diff --git a/include/uapi/linux/usb/audio.h b/include/uapi/linux/usb/audio.h
index 74e520fb944f..ddc5396800aa 100644
--- a/include/uapi/linux/usb/audio.h
+++ b/include/uapi/linux/usb/audio.h
@@ -390,33 +390,64 @@ static inline __u8 uac_processing_unit_iChannelNames(struct uac_processing_unit_
static inline __u8 uac_processing_unit_bControlSize(struct uac_processing_unit_descriptor *desc,
int protocol)
{
- return (protocol == UAC_VERSION_1) ?
- desc->baSourceID[desc->bNrInPins + 4] :
- 2; /* in UAC2, this value is constant */
+ switch (protocol) {
+ case UAC_VERSION_1:
+ return desc->baSourceID[desc->bNrInPins + 4];
+ case UAC_VERSION_2:
+ return 2; /* in UAC2, this value is constant */
+ case UAC_VERSION_3:
+ return 4; /* in UAC3, this value is constant */
+ default:
+ return 1;
+ }
}
static inline __u8 *uac_processing_unit_bmControls(struct uac_processing_unit_descriptor *desc,
int protocol)
{
- return (protocol == UAC_VERSION_1) ?
- &desc->baSourceID[desc->bNrInPins + 5] :
- &desc->baSourceID[desc->bNrInPins + 6];
+ switch (protocol) {
+ case UAC_VERSION_1:
+ return &desc->baSourceID[desc->bNrInPins + 5];
+ case UAC_VERSION_2:
+ return &desc->baSourceID[desc->bNrInPins + 6];
+ case UAC_VERSION_3:
+ return &desc->baSourceID[desc->bNrInPins + 2];
+ default:
+ return NULL;
+ }
}
static inline __u8 uac_processing_unit_iProcessing(struct uac_processing_unit_descriptor *desc,
int protocol)
{
__u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
- return *(uac_processing_unit_bmControls(desc, protocol)
- + control_size);
+
+ switch (protocol) {
+ case UAC_VERSION_1:
+ case UAC_VERSION_2:
+ default:
+ return *(uac_processing_unit_bmControls(desc, protocol)
+ + control_size);
+ case UAC_VERSION_3:
+ return 0; /* UAC3 does not have this field */
+ }
}
static inline __u8 *uac_processing_unit_specific(struct uac_processing_unit_descriptor *desc,
int protocol)
{
__u8 control_size = uac_processing_unit_bControlSize(desc, protocol);
- return uac_processing_unit_bmControls(desc, protocol)
+
+ switch (protocol) {
+ case UAC_VERSION_1:
+ case UAC_VERSION_2:
+ default:
+ return uac_processing_unit_bmControls(desc, protocol)
+ control_size + 1;
+ case UAC_VERSION_3:
+ return uac_processing_unit_bmControls(desc, protocol)
+ + control_size;
+ }
}
/* 4.5.2 Class-Specific AS Interface Descriptor */
diff --git a/include/uapi/linux/usb/g_uvc.h b/include/uapi/linux/usb/g_uvc.h
new file mode 100644
index 000000000000..3c9ee3020cbb
--- /dev/null
+++ b/include/uapi/linux/usb/g_uvc.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * g_uvc.h -- USB Video Class Gadget driver API
+ *
+ * Copyright (C) 2009-2010 Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+ */
+
+#ifndef __LINUX_USB_G_UVC_H
+#define __LINUX_USB_G_UVC_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+#include <linux/usb/ch9.h>
+
+#define UVC_EVENT_FIRST (V4L2_EVENT_PRIVATE_START + 0)
+#define UVC_EVENT_CONNECT (V4L2_EVENT_PRIVATE_START + 0)
+#define UVC_EVENT_DISCONNECT (V4L2_EVENT_PRIVATE_START + 1)
+#define UVC_EVENT_STREAMON (V4L2_EVENT_PRIVATE_START + 2)
+#define UVC_EVENT_STREAMOFF (V4L2_EVENT_PRIVATE_START + 3)
+#define UVC_EVENT_SETUP (V4L2_EVENT_PRIVATE_START + 4)
+#define UVC_EVENT_DATA (V4L2_EVENT_PRIVATE_START + 5)
+#define UVC_EVENT_LAST (V4L2_EVENT_PRIVATE_START + 5)
+
+struct uvc_request_data {
+ __s32 length;
+ __u8 data[60];
+};
+
+struct uvc_event {
+ union {
+ enum usb_device_speed speed;
+ struct usb_ctrlrequest req;
+ struct uvc_request_data data;
+ };
+};
+
+#define UVCIOC_SEND_RESPONSE _IOW('U', 1, struct uvc_request_data)
+
+#endif /* __LINUX_USB_G_UVC_H */
diff --git a/include/uapi/linux/usb/tmc.h b/include/uapi/linux/usb/tmc.h
index 03f6adc8f35b..729af2f861a4 100644
--- a/include/uapi/linux/usb/tmc.h
+++ b/include/uapi/linux/usb/tmc.h
@@ -16,6 +16,8 @@
#ifndef __LINUX_USB_TMC_H
#define __LINUX_USB_TMC_H
+#include <linux/types.h> /* __u8 etc */
+
/* USB TMC status values */
#define USBTMC_STATUS_SUCCESS 0x01
#define USBTMC_STATUS_PENDING 0x02
@@ -38,6 +40,11 @@
#define USBTMC488_REQUEST_GOTO_LOCAL 161
#define USBTMC488_REQUEST_LOCAL_LOCKOUT 162
+struct usbtmc_termchar {
+ __u8 term_char;
+ __u8 term_char_enabled;
+} __attribute__ ((packed));
+
/* Request values for USBTMC driver's ioctl entry point */
#define USBTMC_IOC_NR 91
#define USBTMC_IOCTL_INDICATOR_PULSE _IO(USBTMC_IOC_NR, 1)
@@ -46,11 +53,17 @@
#define USBTMC_IOCTL_ABORT_BULK_IN _IO(USBTMC_IOC_NR, 4)
#define USBTMC_IOCTL_CLEAR_OUT_HALT _IO(USBTMC_IOC_NR, 6)
#define USBTMC_IOCTL_CLEAR_IN_HALT _IO(USBTMC_IOC_NR, 7)
+#define USBTMC_IOCTL_GET_TIMEOUT _IOR(USBTMC_IOC_NR, 9, __u32)
+#define USBTMC_IOCTL_SET_TIMEOUT _IOW(USBTMC_IOC_NR, 10, __u32)
+#define USBTMC_IOCTL_EOM_ENABLE _IOW(USBTMC_IOC_NR, 11, __u8)
+#define USBTMC_IOCTL_CONFIG_TERMCHAR _IOW(USBTMC_IOC_NR, 12, struct usbtmc_termchar)
+
#define USBTMC488_IOCTL_GET_CAPS _IOR(USBTMC_IOC_NR, 17, unsigned char)
#define USBTMC488_IOCTL_READ_STB _IOR(USBTMC_IOC_NR, 18, unsigned char)
#define USBTMC488_IOCTL_REN_CONTROL _IOW(USBTMC_IOC_NR, 19, unsigned char)
#define USBTMC488_IOCTL_GOTO_LOCAL _IO(USBTMC_IOC_NR, 20)
#define USBTMC488_IOCTL_LOCAL_LOCKOUT _IO(USBTMC_IOC_NR, 21)
+#define USBTMC488_IOCTL_TRIGGER _IO(USBTMC_IOC_NR, 22)
/* Driver encoded usb488 capabilities */
#define USBTMC488_CAPABILITY_TRIGGER 1
diff --git a/include/uapi/linux/uvcvideo.h b/include/uapi/linux/uvcvideo.h
index 020714d2c5bd..f80f05b3c423 100644
--- a/include/uapi/linux/uvcvideo.h
+++ b/include/uapi/linux/uvcvideo.h
@@ -28,6 +28,8 @@
#define UVC_CTRL_FLAG_RESTORE (1 << 6)
/* Control can be updated by the camera. */
#define UVC_CTRL_FLAG_AUTO_UPDATE (1 << 7)
+/* Control supports asynchronous reporting */
+#define UVC_CTRL_FLAG_ASYNCHRONOUS (1 << 8)
#define UVC_CTRL_FLAG_GET_RANGE \
(UVC_CTRL_FLAG_GET_CUR | UVC_CTRL_FLAG_GET_MIN | \
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index 8d473c979b61..e4ee10ee917d 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -188,7 +188,7 @@ enum v4l2_colorfx {
/* The base for the imx driver controls.
* We reserve 16 controls for this driver. */
-#define V4L2_CID_USER_IMX_BASE (V4L2_CID_USER_BASE + 0x1090)
+#define V4L2_CID_USER_IMX_BASE (V4L2_CID_USER_BASE + 0x10b0)
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
@@ -587,7 +587,23 @@ enum v4l2_vp8_golden_frame_sel {
#define V4L2_CID_MPEG_VIDEO_VPX_MAX_QP (V4L2_CID_MPEG_BASE+508)
#define V4L2_CID_MPEG_VIDEO_VPX_I_FRAME_QP (V4L2_CID_MPEG_BASE+509)
#define V4L2_CID_MPEG_VIDEO_VPX_P_FRAME_QP (V4L2_CID_MPEG_BASE+510)
-#define V4L2_CID_MPEG_VIDEO_VPX_PROFILE (V4L2_CID_MPEG_BASE+511)
+
+#define V4L2_CID_MPEG_VIDEO_VP8_PROFILE (V4L2_CID_MPEG_BASE+511)
+enum v4l2_mpeg_video_vp8_profile {
+ V4L2_MPEG_VIDEO_VP8_PROFILE_0 = 0,
+ V4L2_MPEG_VIDEO_VP8_PROFILE_1 = 1,
+ V4L2_MPEG_VIDEO_VP8_PROFILE_2 = 2,
+ V4L2_MPEG_VIDEO_VP8_PROFILE_3 = 3,
+};
+/* Deprecated alias for compatibility reasons. */
+#define V4L2_CID_MPEG_VIDEO_VPX_PROFILE V4L2_CID_MPEG_VIDEO_VP8_PROFILE
+#define V4L2_CID_MPEG_VIDEO_VP9_PROFILE (V4L2_CID_MPEG_BASE+512)
+enum v4l2_mpeg_video_vp9_profile {
+ V4L2_MPEG_VIDEO_VP9_PROFILE_0 = 0,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_1 = 1,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_2 = 2,
+ V4L2_MPEG_VIDEO_VP9_PROFILE_3 = 3,
+};
/* CIDs for HEVC encoding. */
diff --git a/include/uapi/linux/v4l2-subdev.h b/include/uapi/linux/v4l2-subdev.h
index c95a53e6743c..03970ce30741 100644
--- a/include/uapi/linux/v4l2-subdev.h
+++ b/include/uapi/linux/v4l2-subdev.h
@@ -170,8 +170,12 @@ struct v4l2_subdev_selection {
#define VIDIOC_SUBDEV_G_SELECTION _IOWR('V', 61, struct v4l2_subdev_selection)
#define VIDIOC_SUBDEV_S_SELECTION _IOWR('V', 62, struct v4l2_subdev_selection)
/* The following ioctls are identical to the ioctls in videodev2.h */
+#define VIDIOC_SUBDEV_G_STD _IOR('V', 23, v4l2_std_id)
+#define VIDIOC_SUBDEV_S_STD _IOW('V', 24, v4l2_std_id)
+#define VIDIOC_SUBDEV_ENUMSTD _IOWR('V', 25, struct v4l2_standard)
#define VIDIOC_SUBDEV_G_EDID _IOWR('V', 40, struct v4l2_edid)
#define VIDIOC_SUBDEV_S_EDID _IOWR('V', 41, struct v4l2_edid)
+#define VIDIOC_SUBDEV_QUERYSTD _IOR('V', 63, v4l2_std_id)
#define VIDIOC_SUBDEV_S_DV_TIMINGS _IOWR('V', 87, struct v4l2_dv_timings)
#define VIDIOC_SUBDEV_G_DV_TIMINGS _IOWR('V', 88, struct v4l2_dv_timings)
#define VIDIOC_SUBDEV_ENUM_DV_TIMINGS _IOWR('V', 98, struct v4l2_enum_dv_timings)
diff --git a/include/uapi/linux/vhost.h b/include/uapi/linux/vhost.h
index c51f8e5cc608..b1e22c40c4b6 100644
--- a/include/uapi/linux/vhost.h
+++ b/include/uapi/linux/vhost.h
@@ -65,6 +65,7 @@ struct vhost_iotlb_msg {
};
#define VHOST_IOTLB_MSG 0x1
+#define VHOST_IOTLB_MSG_V2 0x2
struct vhost_msg {
int type;
@@ -74,6 +75,15 @@ struct vhost_msg {
};
};
+struct vhost_msg_v2 {
+ __u32 type;
+ __u32 reserved;
+ union {
+ struct vhost_iotlb_msg iotlb;
+ __u8 padding[64];
+ };
+};
+
struct vhost_memory_region {
__u64 guest_phys_addr;
__u64 memory_size; /* bytes */
@@ -160,6 +170,14 @@ struct vhost_memory {
#define VHOST_GET_VRING_BUSYLOOP_TIMEOUT _IOW(VHOST_VIRTIO, 0x24, \
struct vhost_vring_state)
+/* Set or get vhost backend capability */
+
+/* Use message type V2 */
+#define VHOST_BACKEND_F_IOTLB_MSG_V2 0x1
+
+#define VHOST_SET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x25, __u64)
+#define VHOST_GET_BACKEND_FEATURES _IOW(VHOST_VIRTIO, 0x26, __u64)
+
/* VHOST_NET specific defines */
/* Attach virtio net ring to a raw socket, or tap device.
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 600877be5c22..5d1a3685bea9 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -522,6 +522,7 @@ struct v4l2_pix_format {
/* Grey bit-packed formats */
#define V4L2_PIX_FMT_Y10BPACK v4l2_fourcc('Y', '1', '0', 'B') /* 10 Greyscale bit-packed */
+#define V4L2_PIX_FMT_Y10P v4l2_fourcc('Y', '1', '0', 'P') /* 10 Greyscale, MIPI RAW10 packed */
/* Palette formats */
#define V4L2_PIX_FMT_PAL8 v4l2_fourcc('P', 'A', 'L', '8') /* 8 8-bit palette */
@@ -609,6 +610,11 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_SGBRG12P v4l2_fourcc('p', 'G', 'C', 'C')
#define V4L2_PIX_FMT_SGRBG12P v4l2_fourcc('p', 'g', 'C', 'C')
#define V4L2_PIX_FMT_SRGGB12P v4l2_fourcc('p', 'R', 'C', 'C')
+ /* 14bit raw bayer packed, 7 bytes for every 4 pixels */
+#define V4L2_PIX_FMT_SBGGR14P v4l2_fourcc('p', 'B', 'E', 'E')
+#define V4L2_PIX_FMT_SGBRG14P v4l2_fourcc('p', 'G', 'E', 'E')
+#define V4L2_PIX_FMT_SGRBG14P v4l2_fourcc('p', 'g', 'E', 'E')
+#define V4L2_PIX_FMT_SRGGB14P v4l2_fourcc('p', 'R', 'E', 'E')
#define V4L2_PIX_FMT_SBGGR16 v4l2_fourcc('B', 'Y', 'R', '2') /* 16 BGBG.. GRGR.. */
#define V4L2_PIX_FMT_SGBRG16 v4l2_fourcc('G', 'B', '1', '6') /* 16 GBGB.. RGRG.. */
#define V4L2_PIX_FMT_SGRBG16 v4l2_fourcc('G', 'R', '1', '6') /* 16 GRGR.. BGBG.. */
@@ -636,6 +642,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_VP8 v4l2_fourcc('V', 'P', '8', '0') /* VP8 */
#define V4L2_PIX_FMT_VP9 v4l2_fourcc('V', 'P', '9', '0') /* VP9 */
#define V4L2_PIX_FMT_HEVC v4l2_fourcc('H', 'E', 'V', 'C') /* HEVC aka H.265 */
+#define V4L2_PIX_FMT_FWHT v4l2_fourcc('F', 'W', 'H', 'T') /* Fast Walsh Hadamard Transform (vicodec) */
/* Vendor-specific formats */
#define V4L2_PIX_FMT_CPIA1 v4l2_fourcc('C', 'P', 'I', 'A') /* cpia1 YUV */
@@ -2310,7 +2317,6 @@ struct v4l2_create_buffers {
*
*/
#define VIDIOC_QUERYCAP _IOR('V', 0, struct v4l2_capability)
-#define VIDIOC_RESERVED _IO('V', 1)
#define VIDIOC_ENUM_FMT _IOWR('V', 2, struct v4l2_fmtdesc)
#define VIDIOC_G_FMT _IOWR('V', 4, struct v4l2_format)
#define VIDIOC_S_FMT _IOWR('V', 5, struct v4l2_format)
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index e3af2859188b..5f3b9fec7b5f 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -305,9 +305,12 @@ enum xfrm_attr_type_t {
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
XFRMA_PAD,
XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */
- XFRMA_OUTPUT_MARK, /* __u32 */
+ XFRMA_SET_MARK, /* __u32 */
+ XFRMA_SET_MARK_MASK, /* __u32 */
+ XFRMA_IF_ID, /* __u32 */
__XFRMA_MAX
+#define XFRMA_OUTPUT_MARK XFRMA_SET_MARK /* Compatibility */
#define XFRMA_MAX (__XFRMA_MAX - 1)
};
diff --git a/include/uapi/mtd/ubi-user.h b/include/uapi/mtd/ubi-user.h
index 5b04a494d139..aad3b6201fc0 100644
--- a/include/uapi/mtd/ubi-user.h
+++ b/include/uapi/mtd/ubi-user.h
@@ -285,6 +285,20 @@ struct ubi_attach_req {
__s8 padding[10];
};
+/*
+ * UBI volume flags.
+ *
+ * @UBI_VOL_SKIP_CRC_CHECK_FLG: skip the CRC check done on a static volume at
+ * open time. Only valid for static volumes and
+ * should only be used if the volume user has a
+ * way to verify data integrity
+ */
+enum {
+ UBI_VOL_SKIP_CRC_CHECK_FLG = 0x1,
+};
+
+#define UBI_VOL_VALID_FLGS (UBI_VOL_SKIP_CRC_CHECK_FLG)
+
/**
* struct ubi_mkvol_req - volume description data structure used in
* volume creation requests.
@@ -292,7 +306,7 @@ struct ubi_attach_req {
* @alignment: volume alignment
* @bytes: volume size in bytes
* @vol_type: volume type (%UBI_DYNAMIC_VOLUME or %UBI_STATIC_VOLUME)
- * @padding1: reserved for future, not used, has to be zeroed
+ * @flags: volume flags (%UBI_VOL_SKIP_CRC_CHECK_FLG)
* @name_len: volume name length
* @padding2: reserved for future, not used, has to be zeroed
* @name: volume name
@@ -321,7 +335,7 @@ struct ubi_mkvol_req {
__s32 alignment;
__s64 bytes;
__s8 vol_type;
- __s8 padding1;
+ __u8 flags;
__s16 name_len;
__s8 padding2[4];
char name[UBI_MAX_VOLUME_NAME + 1];
diff --git a/include/uapi/rdma/cxgb4-abi.h b/include/uapi/rdma/cxgb4-abi.h
index a159ba8dcf8f..f85ec1a3f727 100644
--- a/include/uapi/rdma/cxgb4-abi.h
+++ b/include/uapi/rdma/cxgb4-abi.h
@@ -44,6 +44,16 @@
* In particular do not use pointer types -- pass pointers in __aligned_u64
* instead.
*/
+
+enum {
+ C4IW_64B_CQE = (1 << 0)
+};
+
+struct c4iw_create_cq {
+ __u32 flags;
+ __u32 reserved;
+};
+
struct c4iw_create_cq_resp {
__aligned_u64 key;
__aligned_u64 gts_key;
@@ -51,11 +61,12 @@ struct c4iw_create_cq_resp {
__u32 cqid;
__u32 size;
__u32 qid_mask;
- __u32 reserved; /* explicit padding (optional for i386) */
+ __u32 flags;
};
enum {
- C4IW_QPF_ONCHIP = (1 << 0)
+ C4IW_QPF_ONCHIP = (1 << 0),
+ C4IW_QPF_WRITE_W_IMM = (1 << 1)
};
struct c4iw_create_qp_resp {
@@ -74,6 +85,23 @@ struct c4iw_create_qp_resp {
__u32 flags;
};
+struct c4iw_create_srq_resp {
+ __aligned_u64 srq_key;
+ __aligned_u64 srq_db_gts_key;
+ __aligned_u64 srq_memsize;
+ __u32 srqid;
+ __u32 srq_size;
+ __u32 rqt_abs_idx;
+ __u32 qid_mask;
+ __u32 flags;
+ __u32 reserved; /* explicit padding */
+};
+
+enum {
+ /* HW supports SRQ_LIMIT_REACHED event */
+ T4_SRQ_LIMIT_SUPPORT = 1 << 0,
+};
+
struct c4iw_alloc_ucontext_resp {
__aligned_u64 status_page_key;
__u32 status_page_size;
diff --git a/include/uapi/rdma/hns-abi.h b/include/uapi/rdma/hns-abi.h
index 78613b609fa8..c1f87735514f 100644
--- a/include/uapi/rdma/hns-abi.h
+++ b/include/uapi/rdma/hns-abi.h
@@ -53,6 +53,7 @@ struct hns_roce_ib_create_qp {
__u8 log_sq_stride;
__u8 sq_no_prefetch;
__u8 reserved[5];
+ __aligned_u64 sdb_addr;
};
struct hns_roce_ib_create_qp_resp {
diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h
index 888ac5975a6c..2c881aaf05c2 100644
--- a/include/uapi/rdma/ib_user_ioctl_cmds.h
+++ b/include/uapi/rdma/ib_user_ioctl_cmds.h
@@ -79,7 +79,7 @@ enum uverbs_attrs_destroy_cq_cmd_attr_ids {
};
enum uverbs_attrs_create_flow_action_esp {
- UVERBS_ATTR_FLOW_ACTION_ESP_HANDLE,
+ UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE,
UVERBS_ATTR_FLOW_ACTION_ESP_ATTRS,
UVERBS_ATTR_FLOW_ACTION_ESP_ESN,
UVERBS_ATTR_FLOW_ACTION_ESP_KEYMAT,
@@ -87,6 +87,11 @@ enum uverbs_attrs_create_flow_action_esp {
UVERBS_ATTR_FLOW_ACTION_ESP_ENCAP,
};
+enum uverbs_attrs_modify_flow_action_esp {
+ UVERBS_ATTR_MODIFY_FLOW_ACTION_ESP_HANDLE =
+ UVERBS_ATTR_CREATE_FLOW_ACTION_ESP_HANDLE,
+};
+
enum uverbs_attrs_destroy_flow_action_esp {
UVERBS_ATTR_DESTROY_FLOW_ACTION_HANDLE,
};
diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h
index 625545d862d7..6cdf192070a2 100644
--- a/include/uapi/rdma/ib_user_ioctl_verbs.h
+++ b/include/uapi/rdma/ib_user_ioctl_verbs.h
@@ -40,6 +40,59 @@
#define RDMA_UAPI_PTR(_type, _name) __aligned_u64 _name
#endif
+enum ib_uverbs_access_flags {
+ IB_UVERBS_ACCESS_LOCAL_WRITE = 1 << 0,
+ IB_UVERBS_ACCESS_REMOTE_WRITE = 1 << 1,
+ IB_UVERBS_ACCESS_REMOTE_READ = 1 << 2,
+ IB_UVERBS_ACCESS_REMOTE_ATOMIC = 1 << 3,
+ IB_UVERBS_ACCESS_MW_BIND = 1 << 4,
+ IB_UVERBS_ACCESS_ZERO_BASED = 1 << 5,
+ IB_UVERBS_ACCESS_ON_DEMAND = 1 << 6,
+ IB_UVERBS_ACCESS_HUGETLB = 1 << 7,
+};
+
+enum ib_uverbs_query_port_cap_flags {
+ IB_UVERBS_PCF_SM = 1 << 1,
+ IB_UVERBS_PCF_NOTICE_SUP = 1 << 2,
+ IB_UVERBS_PCF_TRAP_SUP = 1 << 3,
+ IB_UVERBS_PCF_OPT_IPD_SUP = 1 << 4,
+ IB_UVERBS_PCF_AUTO_MIGR_SUP = 1 << 5,
+ IB_UVERBS_PCF_SL_MAP_SUP = 1 << 6,
+ IB_UVERBS_PCF_MKEY_NVRAM = 1 << 7,
+ IB_UVERBS_PCF_PKEY_NVRAM = 1 << 8,
+ IB_UVERBS_PCF_LED_INFO_SUP = 1 << 9,
+ IB_UVERBS_PCF_SM_DISABLED = 1 << 10,
+ IB_UVERBS_PCF_SYS_IMAGE_GUID_SUP = 1 << 11,
+ IB_UVERBS_PCF_PKEY_SW_EXT_PORT_TRAP_SUP = 1 << 12,
+ IB_UVERBS_PCF_EXTENDED_SPEEDS_SUP = 1 << 14,
+ IB_UVERBS_PCF_CM_SUP = 1 << 16,
+ IB_UVERBS_PCF_SNMP_TUNNEL_SUP = 1 << 17,
+ IB_UVERBS_PCF_REINIT_SUP = 1 << 18,
+ IB_UVERBS_PCF_DEVICE_MGMT_SUP = 1 << 19,
+ IB_UVERBS_PCF_VENDOR_CLASS_SUP = 1 << 20,
+ IB_UVERBS_PCF_DR_NOTICE_SUP = 1 << 21,
+ IB_UVERBS_PCF_CAP_MASK_NOTICE_SUP = 1 << 22,
+ IB_UVERBS_PCF_BOOT_MGMT_SUP = 1 << 23,
+ IB_UVERBS_PCF_LINK_LATENCY_SUP = 1 << 24,
+ IB_UVERBS_PCF_CLIENT_REG_SUP = 1 << 25,
+ /*
+ * IsOtherLocalChangesNoticeSupported is aliased by IP_BASED_GIDS and
+ * is inaccessible
+ */
+ IB_UVERBS_PCF_LINK_SPEED_WIDTH_TABLE_SUP = 1 << 27,
+ IB_UVERBS_PCF_VENDOR_SPECIFIC_MADS_TABLE_SUP = 1 << 28,
+ IB_UVERBS_PCF_MCAST_PKEY_TRAP_SUPPRESSION_SUP = 1 << 29,
+ IB_UVERBS_PCF_MCAST_FDB_TOP_SUP = 1 << 30,
+ IB_UVERBS_PCF_HIERARCHY_INFO_SUP = 1ULL << 31,
+
+ /* NOTE this is an internal flag, not an IBA flag */
+ IB_UVERBS_PCF_IP_BASED_GIDS = 1 << 26,
+};
+
+enum ib_uverbs_query_port_flags {
+ IB_UVERBS_QPF_GRH_REQUIRED = 1 << 0,
+};
+
enum ib_uverbs_flow_action_esp_keymat {
IB_UVERBS_FLOW_ACTION_ESP_KEYMAT_AES_GCM,
};
@@ -99,4 +152,9 @@ struct ib_uverbs_flow_action_esp {
__aligned_u64 hard_limit_pkts;
};
+enum ib_uverbs_read_counters_flags {
+ /* prefer read values from driver cache */
+ IB_UVERBS_READ_COUNTERS_PREFER_CACHED = 1 << 0,
+};
+
#endif
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 4f9991de8e3a..25a16760de2a 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -279,7 +279,7 @@ struct ib_uverbs_query_port {
};
struct ib_uverbs_query_port_resp {
- __u32 port_cap_flags;
+ __u32 port_cap_flags; /* see ib_uverbs_query_port_cap_flags */
__u32 max_msg_sz;
__u32 bad_pkey_cntr;
__u32 qkey_viol_cntr;
@@ -299,7 +299,8 @@ struct ib_uverbs_query_port_resp {
__u8 active_speed;
__u8 phys_state;
__u8 link_layer;
- __u8 reserved[2];
+ __u8 flags; /* see ib_uverbs_query_port_flags */
+ __u8 reserved;
};
struct ib_uverbs_alloc_pd {
diff --git a/include/uapi/rdma/mlx5-abi.h b/include/uapi/rdma/mlx5-abi.h
index 8daec1fa49cf..addbb9c4529e 100644
--- a/include/uapi/rdma/mlx5-abi.h
+++ b/include/uapi/rdma/mlx5-abi.h
@@ -76,6 +76,9 @@ enum mlx5_lib_caps {
MLX5_LIB_CAP_4K_UAR = (__u64)1 << 0,
};
+enum mlx5_ib_alloc_uctx_v2_flags {
+ MLX5_IB_ALLOC_UCTX_DEVX = 1 << 0,
+};
struct mlx5_ib_alloc_ucontext_req_v2 {
__u32 total_num_bfregs;
__u32 num_low_latency_bfregs;
@@ -90,6 +93,7 @@ struct mlx5_ib_alloc_ucontext_req_v2 {
enum mlx5_ib_alloc_ucontext_resp_mask {
MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_CORE_CLOCK_OFFSET = 1UL << 0,
+ MLX5_IB_ALLOC_UCONTEXT_RESP_MASK_DUMP_FILL_MKEY = 1UL << 1,
};
enum mlx5_user_cmds_supp_uhw {
@@ -138,7 +142,7 @@ struct mlx5_ib_alloc_ucontext_resp {
__u32 log_uar_size;
__u32 num_uars_per_page;
__u32 num_dyn_bfregs;
- __u32 reserved3;
+ __u32 dump_fill_mkey;
};
struct mlx5_ib_alloc_pd_resp {
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index f7d685ef2d1f..9c51801b9e64 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -33,6 +33,7 @@
#ifndef MLX5_USER_IOCTL_CMDS_H
#define MLX5_USER_IOCTL_CMDS_H
+#include <linux/types.h>
#include <rdma/ib_user_ioctl_cmds.h>
enum mlx5_ib_create_flow_action_attrs {
@@ -45,4 +46,124 @@ enum mlx5_ib_alloc_dm_attrs {
MLX5_IB_ATTR_ALLOC_DM_RESP_PAGE_INDEX,
};
+enum mlx5_ib_devx_methods {
+ MLX5_IB_METHOD_DEVX_OTHER = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_DEVX_QUERY_UAR,
+ MLX5_IB_METHOD_DEVX_QUERY_EQN,
+};
+
+enum mlx5_ib_devx_other_attrs {
+ MLX5_IB_ATTR_DEVX_OTHER_CMD_IN = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_DEVX_OTHER_CMD_OUT,
+};
+
+enum mlx5_ib_devx_obj_create_attrs {
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_IN,
+ MLX5_IB_ATTR_DEVX_OBJ_CREATE_CMD_OUT,
+};
+
+enum mlx5_ib_devx_query_uar_attrs {
+ MLX5_IB_ATTR_DEVX_QUERY_UAR_USER_IDX = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_DEVX_QUERY_UAR_DEV_IDX,
+};
+
+enum mlx5_ib_devx_obj_destroy_attrs {
+ MLX5_IB_ATTR_DEVX_OBJ_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum mlx5_ib_devx_obj_modify_attrs {
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_IN,
+ MLX5_IB_ATTR_DEVX_OBJ_MODIFY_CMD_OUT,
+};
+
+enum mlx5_ib_devx_obj_query_attrs {
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_IN,
+ MLX5_IB_ATTR_DEVX_OBJ_QUERY_CMD_OUT,
+};
+
+enum mlx5_ib_devx_query_eqn_attrs {
+ MLX5_IB_ATTR_DEVX_QUERY_EQN_USER_VEC = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_DEVX_QUERY_EQN_DEV_EQN,
+};
+
+enum mlx5_ib_devx_obj_methods {
+ MLX5_IB_METHOD_DEVX_OBJ_CREATE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_DEVX_OBJ_DESTROY,
+ MLX5_IB_METHOD_DEVX_OBJ_MODIFY,
+ MLX5_IB_METHOD_DEVX_OBJ_QUERY,
+};
+
+enum mlx5_ib_devx_umem_reg_attrs {
+ MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_LEN,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_ACCESS,
+ MLX5_IB_ATTR_DEVX_UMEM_REG_OUT_ID,
+};
+
+enum mlx5_ib_devx_umem_dereg_attrs {
+ MLX5_IB_ATTR_DEVX_UMEM_DEREG_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum mlx5_ib_devx_umem_methods {
+ MLX5_IB_METHOD_DEVX_UMEM_REG = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_DEVX_UMEM_DEREG,
+};
+
+enum mlx5_ib_objects {
+ MLX5_IB_OBJECT_DEVX = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_OBJECT_DEVX_OBJ,
+ MLX5_IB_OBJECT_DEVX_UMEM,
+ MLX5_IB_OBJECT_FLOW_MATCHER,
+};
+
+enum mlx5_ib_flow_matcher_create_attrs {
+ MLX5_IB_ATTR_FLOW_MATCHER_CREATE_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_FLOW_MATCHER_MATCH_MASK,
+ MLX5_IB_ATTR_FLOW_MATCHER_FLOW_TYPE,
+ MLX5_IB_ATTR_FLOW_MATCHER_MATCH_CRITERIA,
+};
+
+enum mlx5_ib_flow_matcher_destroy_attrs {
+ MLX5_IB_ATTR_FLOW_MATCHER_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum mlx5_ib_flow_matcher_methods {
+ MLX5_IB_METHOD_FLOW_MATCHER_CREATE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_FLOW_MATCHER_DESTROY,
+};
+
+#define MLX5_IB_DW_MATCH_PARAM 0x80
+
+struct mlx5_ib_match_params {
+ __u32 match_params[MLX5_IB_DW_MATCH_PARAM];
+};
+
+enum mlx5_ib_flow_type {
+ MLX5_IB_FLOW_TYPE_NORMAL,
+ MLX5_IB_FLOW_TYPE_SNIFFER,
+ MLX5_IB_FLOW_TYPE_ALL_DEFAULT,
+ MLX5_IB_FLOW_TYPE_MC_DEFAULT,
+};
+
+enum mlx5_ib_create_flow_attrs {
+ MLX5_IB_ATTR_CREATE_FLOW_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_CREATE_FLOW_MATCH_VALUE,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_QP,
+ MLX5_IB_ATTR_CREATE_FLOW_DEST_DEVX,
+ MLX5_IB_ATTR_CREATE_FLOW_MATCHER,
+};
+
+enum mlx5_ib_destoy_flow_attrs {
+ MLX5_IB_ATTR_DESTROY_FLOW_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum mlx5_ib_flow_methods {
+ MLX5_IB_METHOD_CREATE_FLOW = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_DESTROY_FLOW,
+};
+
#endif
diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h
index 24c658b3c790..7a10b3a325fa 100644
--- a/include/uapi/rdma/qedr-abi.h
+++ b/include/uapi/rdma/qedr-abi.h
@@ -111,4 +111,21 @@ struct qedr_create_qp_uresp {
__u32 reserved;
};
+struct qedr_create_srq_ureq {
+ /* user space virtual address of producer pair */
+ __aligned_u64 prod_pair_addr;
+
+ /* user space virtual address of SRQ buffer */
+ __aligned_u64 srq_addr;
+
+ /* length of SRQ buffer */
+ __aligned_u64 srq_len;
+};
+
+struct qedr_create_srq_uresp {
+ __u16 srq_id;
+ __u16 reserved0;
+ __u32 reserved1;
+};
+
#endif /* __QEDR_USER_H__ */
diff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h
index 1da5a1e1f3a8..24800c6c1f32 100644
--- a/include/uapi/rdma/rdma_user_ioctl_cmds.h
+++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h
@@ -62,7 +62,12 @@ struct ib_uverbs_attr {
} enum_data;
__u16 reserved;
} attr_data;
- __aligned_u64 data; /* ptr to command, inline data or idr/fd */
+ union {
+ /* Used by PTR_IN/OUT, ENUM_IN and IDR */
+ __aligned_u64 data;
+ /* Used by FD_IN and FD_OUT */
+ __s64 data_s64;
+ };
};
struct ib_uverbs_ioctl_hdr {
diff --git a/include/uapi/xen/gntdev.h b/include/uapi/xen/gntdev.h
index 6d1163456c03..fe4423e518c6 100644
--- a/include/uapi/xen/gntdev.h
+++ b/include/uapi/xen/gntdev.h
@@ -5,6 +5,7 @@
* Interface to /dev/xen/gntdev.
*
* Copyright (c) 2007, D G Murray
+ * Copyright (c) 2018, Oleksandr Andrushchenko, EPAM Systems Inc.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
@@ -200,4 +201,109 @@ struct ioctl_gntdev_grant_copy {
/* Send an interrupt on the indicated event channel */
#define UNMAP_NOTIFY_SEND_EVENT 0x2
+/*
+ * Flags to be used while requesting memory mapping's backing storage
+ * to be allocated with DMA API.
+ */
+
+/*
+ * The buffer is backed with memory allocated with dma_alloc_wc.
+ */
+#define GNTDEV_DMA_FLAG_WC (1 << 0)
+
+/*
+ * The buffer is backed with memory allocated with dma_alloc_coherent.
+ */
+#define GNTDEV_DMA_FLAG_COHERENT (1 << 1)
+
+/*
+ * Create a dma-buf [1] from grant references @refs of count @count provided
+ * by the foreign domain @domid with flags @flags.
+ *
+ * By default dma-buf is backed by system memory pages, but by providing
+ * one of the GNTDEV_DMA_FLAG_XXX flags it can also be created as
+ * a DMA write-combine or coherent buffer, e.g. allocated with dma_alloc_wc/
+ * dma_alloc_coherent.
+ *
+ * Returns 0 if dma-buf was successfully created and the corresponding
+ * dma-buf's file descriptor is returned in @fd.
+ *
+ * [1] Documentation/driver-api/dma-buf.rst
+ */
+
+#define IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS \
+ _IOC(_IOC_NONE, 'G', 9, \
+ sizeof(struct ioctl_gntdev_dmabuf_exp_from_refs))
+struct ioctl_gntdev_dmabuf_exp_from_refs {
+ /* IN parameters. */
+ /* Specific options for this dma-buf: see GNTDEV_DMA_FLAG_XXX. */
+ __u32 flags;
+ /* Number of grant references in @refs array. */
+ __u32 count;
+ /* OUT parameters. */
+ /* File descriptor of the dma-buf. */
+ __u32 fd;
+ /* The domain ID of the grant references to be mapped. */
+ __u32 domid;
+ /* Variable IN parameter. */
+ /* Array of grant references of size @count. */
+ __u32 refs[1];
+};
+
+/*
+ * This will block until the dma-buf with the file descriptor @fd is
+ * released. This is only valid for buffers created with
+ * IOCTL_GNTDEV_DMABUF_EXP_FROM_REFS.
+ *
+ * If within @wait_to_ms milliseconds the buffer is not released
+ * then -ETIMEDOUT error is returned.
+ * If the buffer with the file descriptor @fd does not exist or has already
+ * been released, then -ENOENT is returned. For valid file descriptors
+ * this must not be treated as error.
+ */
+#define IOCTL_GNTDEV_DMABUF_EXP_WAIT_RELEASED \
+ _IOC(_IOC_NONE, 'G', 10, \
+ sizeof(struct ioctl_gntdev_dmabuf_exp_wait_released))
+struct ioctl_gntdev_dmabuf_exp_wait_released {
+ /* IN parameters */
+ __u32 fd;
+ __u32 wait_to_ms;
+};
+
+/*
+ * Import a dma-buf with file descriptor @fd and export granted references
+ * to the pages of that dma-buf into array @refs of size @count.
+ */
+#define IOCTL_GNTDEV_DMABUF_IMP_TO_REFS \
+ _IOC(_IOC_NONE, 'G', 11, \
+ sizeof(struct ioctl_gntdev_dmabuf_imp_to_refs))
+struct ioctl_gntdev_dmabuf_imp_to_refs {
+ /* IN parameters. */
+ /* File descriptor of the dma-buf. */
+ __u32 fd;
+ /* Number of grant references in @refs array. */
+ __u32 count;
+ /* The domain ID for which references to be granted. */
+ __u32 domid;
+ /* Reserved - must be zero. */
+ __u32 reserved;
+ /* OUT parameters. */
+ /* Array of grant references of size @count. */
+ __u32 refs[1];
+};
+
+/*
+ * This will close all references to the imported buffer with file descriptor
+ * @fd, so it can be released by the owner. This is only valid for buffers
+ * created with IOCTL_GNTDEV_DMABUF_IMP_TO_REFS.
+ */
+#define IOCTL_GNTDEV_DMABUF_IMP_RELEASE \
+ _IOC(_IOC_NONE, 'G', 12, \
+ sizeof(struct ioctl_gntdev_dmabuf_imp_release))
+struct ioctl_gntdev_dmabuf_imp_release {
+ /* IN parameters */
+ __u32 fd;
+ __u32 reserved;
+};
+
#endif /* __LINUX_PUBLIC_GNTDEV_H__ */
diff --git a/include/video/mipi_display.h b/include/video/mipi_display.h
index 19aa65a35546..49a53ef8da96 100644
--- a/include/video/mipi_display.h
+++ b/include/video/mipi_display.h
@@ -38,6 +38,9 @@ enum {
MIPI_DSI_DCS_READ = 0x06,
+ MIPI_DSI_DCS_COMPRESSION_MODE = 0x07,
+ MIPI_DSI_PPS_LONG_WRITE = 0x0A,
+
MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE = 0x37,
MIPI_DSI_END_OF_TRANSMISSION = 0x08,
diff --git a/include/video/udlfb.h b/include/video/udlfb.h
index 0cabe6b09095..3abd327bada6 100644
--- a/include/video/udlfb.h
+++ b/include/video/udlfb.h
@@ -20,7 +20,6 @@ struct dloarea {
struct urb_node {
struct list_head entry;
struct dlfb_data *dlfb;
- struct delayed_work release_urb_work;
struct urb *urb;
};
@@ -52,11 +51,14 @@ struct dlfb_data {
int base8;
u32 pseudo_palette[256];
int blank_mode; /*one of FB_BLANK_ */
+ struct fb_ops ops;
/* blit-only rendering path metrics, exposed through sysfs */
atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
atomic_t bytes_identical; /* saved effort with backbuffer comparison */
atomic_t bytes_sent; /* to usb, after compression including overhead */
atomic_t cpu_kcycles_used; /* transpired during pixel processing */
+ struct fb_var_screeninfo current_mode;
+ struct list_head deferred_free;
};
#define NR_USB_REQUEST_I2C_SUB_IO 0x02
@@ -87,7 +89,7 @@ struct dlfb_data {
#define MIN_RAW_PIX_BYTES 2
#define MIN_RAW_CMD_BYTES (RAW_HEADER_BYTES + MIN_RAW_PIX_BYTES)
-#define DL_DEFIO_WRITE_DELAY 5 /* fb_deferred_io.delay in jiffies */
+#define DL_DEFIO_WRITE_DELAY msecs_to_jiffies(HZ <= 300 ? 4 : 10) /* optimal value for 720p video */
#define DL_DEFIO_WRITE_DISABLE (HZ*60) /* "disable" with long delay */
/* remove these once align.h patch is taken into kernel */
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 2e37741f6b8d..9bc5bc07d4d3 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -198,6 +198,27 @@ void gnttab_free_auto_xlat_frames(void);
int gnttab_alloc_pages(int nr_pages, struct page **pages);
void gnttab_free_pages(int nr_pages, struct page **pages);
+#ifdef CONFIG_XEN_GRANT_DMA_ALLOC
+struct gnttab_dma_alloc_args {
+ /* Device for which DMA memory will be/was allocated. */
+ struct device *dev;
+ /* If set then DMA buffer is coherent and write-combine otherwise. */
+ bool coherent;
+
+ int nr_pages;
+ struct page **pages;
+ xen_pfn_t *frames;
+ void *vaddr;
+ dma_addr_t dev_bus_addr;
+};
+
+int gnttab_dma_alloc_pages(struct gnttab_dma_alloc_args *args);
+int gnttab_dma_free_pages(struct gnttab_dma_alloc_args *args);
+#endif
+
+int gnttab_pages_set_private(int nr_pages, struct page **pages);
+void gnttab_pages_clear_private(int nr_pages, struct page **pages);
+
int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
struct gnttab_map_grant_ref *kmap_ops,
struct page **pages, unsigned int count);
diff --git a/include/xen/interface/io/displif.h b/include/xen/interface/io/displif.h
index 596578d9be3e..fdc279dc4a88 100644
--- a/include/xen/interface/io/displif.h
+++ b/include/xen/interface/io/displif.h
@@ -189,6 +189,13 @@
*
*----------------------------- Connector settings ----------------------------
*
+ * unique-id
+ * Values: <string>
+ *
+ * After device instance initialization each connector is assigned a
+ * unique ID, so it can be identified by the backend by this ID.
+ * This can be UUID or such.
+ *
* resolution
* Values: <width, uint32_t>x<height, uint32_t>
*
@@ -368,6 +375,7 @@
#define XENDISPL_FIELD_EVT_CHANNEL "evt-event-channel"
#define XENDISPL_FIELD_RESOLUTION "resolution"
#define XENDISPL_FIELD_BE_ALLOC "be-alloc"
+#define XENDISPL_FIELD_UNIQUE_ID "unique-id"
/*
******************************************************************************
diff --git a/include/xen/interface/io/kbdif.h b/include/xen/interface/io/kbdif.h
index e2340a4130cf..5c7630d7376e 100644
--- a/include/xen/interface/io/kbdif.h
+++ b/include/xen/interface/io/kbdif.h
@@ -51,6 +51,18 @@
* corresponding entries in XenStore and puts 1 as the value of the entry.
* If a feature is not supported then 0 must be set or feature entry omitted.
*
+ * feature-disable-keyboard
+ * Values: <uint>
+ *
+ * If there is no need to expose a virtual keyboard device by the
+ * frontend then this must be set to 1.
+ *
+ * feature-disable-pointer
+ * Values: <uint>
+ *
+ * If there is no need to expose a virtual pointer device by the
+ * frontend then this must be set to 1.
+ *
* feature-abs-pointer
* Values: <uint>
*
@@ -63,6 +75,22 @@
* Backends, which support reporting of multi-touch events
* should set this to 1.
*
+ * feature-raw-pointer
+ * Values: <uint>
+ *
+ * Backends, which support reporting raw (unscaled) absolute coordinates
+ * for pointer devices should set this to 1. Raw (unscaled) values have
+ * a range of [0, 0x7fff].
+ *
+ *----------------------- Device Instance Parameters ------------------------
+ *
+ * unique-id
+ * Values: <string>
+ *
+ * After device instance initialization it is assigned a unique ID,
+ * so every instance of the frontend can be identified by the backend
+ * by this ID. This can be UUID or such.
+ *
*------------------------- Pointer Device Parameters ------------------------
*
* width
@@ -77,6 +105,25 @@
* Maximum Y coordinate (height) to be used by the frontend
* while reporting input events, pixels, [0; UINT32_MAX].
*
+ *----------------------- Multi-touch Device Parameters ----------------------
+ *
+ * multi-touch-num-contacts
+ * Values: <uint>
+ *
+ * Number of simultaneous touches reported.
+ *
+ * multi-touch-width
+ * Values: <uint>
+ *
+ * Width of the touch area to be used by the frontend
+ * while reporting input events, pixels, [0; UINT32_MAX].
+ *
+ * multi-touch-height
+ * Values: <uint>
+ *
+ * Height of the touch area to be used by the frontend
+ * while reporting input events, pixels, [0; UINT32_MAX].
+ *
*****************************************************************************
* Frontend XenBus Nodes
*****************************************************************************
@@ -98,6 +145,13 @@
*
* Request backend to report multi-touch events.
*
+ * request-raw-pointer
+ * Values: <uint>
+ *
+ * Request backend to report raw unscaled absolute pointer coordinates.
+ * This option is only valid if request-abs-pointer is also set.
+ * Raw unscaled coordinates have the range [0, 0x7fff]
+ *
*----------------------- Request Transport Parameters -----------------------
*
* event-channel
@@ -117,25 +171,6 @@
*
* OBSOLETE, not recommended for use.
* PFN of the shared page.
- *
- *----------------------- Multi-touch Device Parameters -----------------------
- *
- * multi-touch-num-contacts
- * Values: <uint>
- *
- * Number of simultaneous touches reported.
- *
- * multi-touch-width
- * Values: <uint>
- *
- * Width of the touch area to be used by the frontend
- * while reporting input events, pixels, [0; UINT32_MAX].
- *
- * multi-touch-height
- * Values: <uint>
- *
- * Height of the touch area to be used by the frontend
- * while reporting input events, pixels, [0; UINT32_MAX].
*/
/*
@@ -163,9 +198,13 @@
#define XENKBD_DRIVER_NAME "vkbd"
+#define XENKBD_FIELD_FEAT_DSBL_KEYBRD "feature-disable-keyboard"
+#define XENKBD_FIELD_FEAT_DSBL_POINTER "feature-disable-pointer"
#define XENKBD_FIELD_FEAT_ABS_POINTER "feature-abs-pointer"
+#define XENKBD_FIELD_FEAT_RAW_POINTER "feature-raw-pointer"
#define XENKBD_FIELD_FEAT_MTOUCH "feature-multi-touch"
#define XENKBD_FIELD_REQ_ABS_POINTER "request-abs-pointer"
+#define XENKBD_FIELD_REQ_RAW_POINTER "request-raw-pointer"
#define XENKBD_FIELD_REQ_MTOUCH "request-multi-touch"
#define XENKBD_FIELD_RING_GREF "page-gref"
#define XENKBD_FIELD_EVT_CHANNEL "event-channel"
@@ -174,6 +213,7 @@
#define XENKBD_FIELD_MT_WIDTH "multi-touch-width"
#define XENKBD_FIELD_MT_HEIGHT "multi-touch-height"
#define XENKBD_FIELD_MT_NUM_CONTACTS "multi-touch-num-contacts"
+#define XENKBD_FIELD_UNIQUE_ID "unique-id"
/* OBSOLETE, not recommended for use */
#define XENKBD_FIELD_RING_REF "page-ref"
diff --git a/include/xen/interface/io/sndif.h b/include/xen/interface/io/sndif.h
index 78bb5d9f8d83..2aac8f73614c 100644
--- a/include/xen/interface/io/sndif.h
+++ b/include/xen/interface/io/sndif.h
@@ -278,13 +278,11 @@
* defined under the same device.
*
* unique-id
- * Values: <uint32_t>
+ * Values: <string>
*
- * After stream initialization it is assigned a unique ID (within the front
- * driver), so every stream of the frontend can be identified by the
- * backend by this ID. This is not equal to stream-idx as the later is
- * zero based within the device, but this index is contigous within the
- * driver.
+ * After stream initialization it is assigned a unique ID, so every
+ * stream of the frontend can be identified by the backend by this ID.
+ * This can be UUID or such.
*
*-------------------- Stream Request Transport Parameters --------------------
*
diff --git a/include/xen/mem-reservation.h b/include/xen/mem-reservation.h
new file mode 100644
index 000000000000..80b52b4945e9
--- /dev/null
+++ b/include/xen/mem-reservation.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Xen memory reservation utilities.
+ *
+ * Copyright (c) 2003, B Dragovic
+ * Copyright (c) 2003-2004, M Williamson, K Fraser
+ * Copyright (c) 2005 Dan M. Smith, IBM Corporation
+ * Copyright (c) 2010 Daniel Kiper
+ * Copyright (c) 2018 Oleksandr Andrushchenko, EPAM Systems Inc.
+ */
+
+#ifndef _XENMEM_RESERVATION_H
+#define _XENMEM_RESERVATION_H
+
+#include <linux/highmem.h>
+
+#include <xen/page.h>
+
+static inline void xenmem_reservation_scrub_page(struct page *page)
+{
+#ifdef CONFIG_XEN_SCRUB_PAGES
+ clear_highpage(page);
+#endif
+}
+
+#ifdef CONFIG_XEN_HAVE_PVMMU
+void __xenmem_reservation_va_mapping_update(unsigned long count,
+ struct page **pages,
+ xen_pfn_t *frames);
+
+void __xenmem_reservation_va_mapping_reset(unsigned long count,
+ struct page **pages);
+#endif
+
+static inline void xenmem_reservation_va_mapping_update(unsigned long count,
+ struct page **pages,
+ xen_pfn_t *frames)
+{
+#ifdef CONFIG_XEN_HAVE_PVMMU
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
+ __xenmem_reservation_va_mapping_update(count, pages, frames);
+#endif
+}
+
+static inline void xenmem_reservation_va_mapping_reset(unsigned long count,
+ struct page **pages)
+{
+#ifdef CONFIG_XEN_HAVE_PVMMU
+ if (!xen_feature(XENFEAT_auto_translated_physmap))
+ __xenmem_reservation_va_mapping_reset(count, pages);
+#endif
+}
+
+int xenmem_reservation_increase(int count, xen_pfn_t *frames);
+
+int xenmem_reservation_decrease(int count, xen_pfn_t *frames);
+
+#endif
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 9d4340c907d1..1e1d9bd0bd37 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -25,12 +25,16 @@ extern bool xen_pvh;
#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
#define xen_pvh_domain() (xen_pvh)
+#include <linux/types.h>
+
+extern uint32_t xen_start_flags;
+
#ifdef CONFIG_XEN_DOM0
#include <xen/interface/xen.h>
#include <asm/xen/hypervisor.h>
#define xen_initial_domain() (xen_domain() && \
- xen_start_info && xen_start_info->flags & SIF_INITDOMAIN)
+ (xen_start_flags & SIF_INITDOMAIN))
#else /* !CONFIG_XEN_DOM0 */
#define xen_initial_domain() (0)
#endif /* CONFIG_XEN_DOM0 */