aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/Kbuild1185
-rw-r--r--include/acpi/acbuffer.h2
-rw-r--r--include/acpi/acconfig.h2
-rw-r--r--include/acpi/acexcep.h2
-rw-r--r--include/acpi/acnames.h2
-rw-r--r--include/acpi/acoutput.h2
-rw-r--r--include/acpi/acpi.h2
-rw-r--r--include/acpi/acpi_bus.h10
-rw-r--r--include/acpi/acpiosxf.h2
-rw-r--r--include/acpi/acpixf.h12
-rw-r--r--include/acpi/acrestyp.h2
-rw-r--r--include/acpi/actbl.h2
-rw-r--r--include/acpi/actbl1.h4
-rw-r--r--include/acpi/actbl2.h2
-rw-r--r--include/acpi/actbl3.h2
-rw-r--r--include/acpi/actypes.h5
-rw-r--r--include/acpi/acuuid.h2
-rw-r--r--include/acpi/button.h14
-rw-r--r--include/acpi/platform/acenv.h13
-rw-r--r--include/acpi/platform/acenvex.h2
-rw-r--r--include/acpi/platform/acgcc.h2
-rw-r--r--include/acpi/platform/acgccex.h2
-rw-r--r--include/acpi/platform/acintel.h2
-rw-r--r--include/acpi/platform/aclinux.h2
-rw-r--r--include/acpi/platform/aclinuxex.h2
-rw-r--r--include/asm-generic/4level-fixup.h40
-rw-r--r--include/asm-generic/5level-fixup.h1
-rw-r--r--include/asm-generic/Kbuild2
-rw-r--r--include/asm-generic/bitops-instrumented.h263
-rw-r--r--include/asm-generic/bitops.h5
-rw-r--r--include/asm-generic/bitops/find.h17
-rw-r--r--include/asm-generic/bitops/instrumented-atomic.h100
-rw-r--r--include/asm-generic/bitops/instrumented-lock.h81
-rw-r--r--include/asm-generic/bitops/instrumented-non-atomic.h114
-rw-r--r--include/asm-generic/cacheflush.h33
-rw-r--r--include/asm-generic/export.h11
-rw-r--r--include/asm-generic/futex.h2
-rw-r--r--include/asm-generic/io.h87
-rw-r--r--include/asm-generic/iomap.h4
-rw-r--r--include/asm-generic/mm_hooks.h5
-rw-r--r--include/asm-generic/mshyperv.h2
-rw-r--r--include/asm-generic/percpu.h10
-rw-r--r--include/asm-generic/pgtable-nop4d.h2
-rw-r--r--include/asm-generic/pgtable-nopmd.h2
-rw-r--r--include/asm-generic/pgtable-nopud.h2
-rw-r--r--include/asm-generic/pgtable.h71
-rw-r--r--include/asm-generic/tlb.h126
-rw-r--r--include/asm-generic/vdso/vsyscall.h14
-rw-r--r--include/asm-generic/vmlinux.lds.h91
-rw-r--r--include/clocksource/hyperv_timer.h9
-rw-r--r--include/clocksource/timer-ti-dm.h4
-rw-r--r--include/crypto/aead.h12
-rw-r--r--include/crypto/algapi.h213
-rw-r--r--include/crypto/blake2s.h106
-rw-r--r--include/crypto/cast6.h7
-rw-r--r--include/crypto/chacha.h83
-rw-r--r--include/crypto/chacha20poly1305.h48
-rw-r--r--include/crypto/curve25519.h73
-rw-r--r--include/crypto/engine.h4
-rw-r--r--include/crypto/hash.h15
-rw-r--r--include/crypto/internal/acompress.h4
-rw-r--r--include/crypto/internal/aead.h21
-rw-r--r--include/crypto/internal/akcipher.h12
-rw-r--r--include/crypto/internal/blake2s.h24
-rw-r--r--include/crypto/internal/chacha.h43
-rw-r--r--include/crypto/internal/des.h35
-rw-r--r--include/crypto/internal/geniv.h1
-rw-r--r--include/crypto/internal/hash.h90
-rw-r--r--include/crypto/internal/poly1305.h33
-rw-r--r--include/crypto/internal/scompress.h4
-rw-r--r--include/crypto/internal/skcipher.h89
-rw-r--r--include/crypto/nhpoly1305.h4
-rw-r--r--include/crypto/poly1305.h93
-rw-r--r--include/crypto/serpent.h4
-rw-r--r--include/crypto/skcipher.h75
-rw-r--r--include/crypto/twofish.h2
-rw-r--r--include/crypto/xts.h21
-rw-r--r--include/drm/amd_asic_type.h56
-rw-r--r--include/drm/ati_pcigart.h31
-rw-r--r--include/drm/bridge/dw_hdmi.h6
-rw-r--r--include/drm/bridge/dw_mipi_dsi.h9
-rw-r--r--include/drm/drmP.h103
-rw-r--r--include/drm/drm_atomic.h62
-rw-r--r--include/drm/drm_atomic_helper.h8
-rw-r--r--include/drm/drm_atomic_state_helper.h6
-rw-r--r--include/drm/drm_bridge.h169
-rw-r--r--include/drm/drm_cache.h2
-rw-r--r--include/drm/drm_color_mgmt.h25
-rw-r--r--include/drm/drm_connector.h49
-rw-r--r--include/drm/drm_crtc.h1
-rw-r--r--include/drm/drm_dp_helper.h152
-rw-r--r--include/drm/drm_dp_mst_helper.h212
-rw-r--r--include/drm/drm_drv.h2
-rw-r--r--include/drm/drm_edid.h5
-rw-r--r--include/drm/drm_encoder.h13
-rw-r--r--include/drm/drm_fb_cma_helper.h2
-rw-r--r--include/drm/drm_fb_helper.h47
-rw-r--r--include/drm/drm_file.h3
-rw-r--r--include/drm/drm_fourcc.h8
-rw-r--r--include/drm/drm_gem.h13
-rw-r--r--include/drm/drm_gem_shmem_helper.h35
-rw-r--r--include/drm/drm_gem_ttm_helper.h21
-rw-r--r--include/drm/drm_gem_vram_helper.h115
-rw-r--r--include/drm/drm_legacy.h29
-rw-r--r--include/drm/drm_mipi_dsi.h4
-rw-r--r--include/drm/drm_mm.h7
-rw-r--r--include/drm/drm_modeset_helper_vtables.h7
-rw-r--r--include/drm/drm_modeset_lock.h9
-rw-r--r--include/drm/drm_of.h21
-rw-r--r--include/drm/drm_os_linux.h55
-rw-r--r--include/drm/drm_panel.h69
-rw-r--r--include/drm/drm_pci.h19
-rw-r--r--include/drm/drm_plane.h31
-rw-r--r--include/drm/drm_prime.h2
-rw-r--r--include/drm/drm_print.h318
-rw-r--r--include/drm/drm_rect.h33
-rw-r--r--include/drm/drm_scdc_helper.h6
-rw-r--r--include/drm/drm_simple_kms_helper.h2
-rw-r--r--include/drm/drm_util.h2
-rw-r--r--include/drm/drm_vblank.h15
-rw-r--r--include/drm/drm_vram_mm_helper.h104
-rw-r--r--include/drm/gpu_scheduler.h26
-rw-r--r--include/drm/i915_drm.h18
-rw-r--r--include/drm/i915_mei_hdcp_interface.h42
-rw-r--r--include/drm/i915_pciids.h31
-rw-r--r--include/drm/task_barrier.h107
-rw-r--r--include/drm/ttm/ttm_bo_api.h90
-rw-r--r--include/drm/ttm/ttm_bo_driver.h32
-rw-r--r--include/drm/ttm/ttm_execbuf_util.h2
-rw-r--r--include/drm/ttm/ttm_memory.h1
-rw-r--r--include/drm/ttm/ttm_page_alloc.h2
-rw-r--r--include/dt-bindings/clock/aspeed-clock.h2
-rw-r--r--include/dt-bindings/clock/ast2600-clock.h4
-rw-r--r--include/dt-bindings/clock/axg-audio-clkc.h10
-rw-r--r--include/dt-bindings/clock/bm1880-clock.h82
-rw-r--r--include/dt-bindings/clock/dra7.h23
-rw-r--r--include/dt-bindings/clock/imx7ulp-clock.h1
-rw-r--r--include/dt-bindings/clock/imx8mm-clock.h19
-rw-r--r--include/dt-bindings/clock/imx8mn-clock.h23
-rw-r--r--include/dt-bindings/clock/imx8mp-clock.h300
-rw-r--r--include/dt-bindings/clock/imx8mq-clock.h24
-rw-r--r--include/dt-bindings/clock/marvell,mmp2.h2
-rw-r--r--include/dt-bindings/clock/meson8-ddr-clkc.h4
-rw-r--r--include/dt-bindings/clock/omap4.h11
-rw-r--r--include/dt-bindings/clock/omap5.h16
-rw-r--r--include/dt-bindings/clock/px30-cru.h2
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sc7180.h46
-rw-r--r--include/dt-bindings/clock/qcom,dispcc-sdm845.h13
-rw-r--r--include/dt-bindings/clock/qcom,gcc-ipq6018.h262
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msm8998.h7
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sc7180.h155
-rw-r--r--include/dt-bindings/clock/qcom,gpucc-sc7180.h21
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-msm8998.h210
-rw-r--r--include/dt-bindings/clock/qcom,q6sstopcc-qcs404.h18
-rw-r--r--include/dt-bindings/clock/qcom,videocc-sc7180.h23
-rw-r--r--include/dt-bindings/clock/r8a774b1-cpg-mssr.h57
-rw-r--r--include/dt-bindings/clock/r8a77961-cpg-mssr.h65
-rw-r--r--include/dt-bindings/clock/sun50i-a64-ccu.h1
-rw-r--r--include/dt-bindings/clock/sun6i-a31-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun8i-a23-a33-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun8i-h3-ccu.h2
-rw-r--r--include/dt-bindings/clock/sun8i-r40-ccu.h2
-rw-r--r--include/dt-bindings/clock/tegra124-car-common.h3
-rw-r--r--include/dt-bindings/clock/tegra210-car.h6
-rw-r--r--include/dt-bindings/clock/ti-dra7-atl.h (renamed from include/dt-bindings/clk/ti-dra7-atl.h)0
-rw-r--r--include/dt-bindings/clock/x1000-cgu.h44
-rw-r--r--include/dt-bindings/clock/xlnx-versal-clk.h123
-rw-r--r--include/dt-bindings/display/sdtv-standards.h76
-rw-r--r--include/dt-bindings/dma/x1000-dma.h40
-rw-r--r--include/dt-bindings/dma/x1830-dma.h39
-rw-r--r--include/dt-bindings/gpio/meson-a1-gpio.h73
-rw-r--r--include/dt-bindings/iio/adc/ingenic,adc.h1
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8916.h100
-rw-r--r--include/dt-bindings/interconnect/qcom,msm8974.h146
-rw-r--r--include/dt-bindings/interrupt-controller/aspeed-scu-ic.h23
-rw-r--r--include/dt-bindings/media/tvp5150.h2
-rw-r--r--include/dt-bindings/memory/tegra186-mc.h139
-rw-r--r--include/dt-bindings/memory/tegra194-mc.h410
-rw-r--r--include/dt-bindings/net/qca-ar803x.h13
-rw-r--r--include/dt-bindings/net/ti-dp83869.h42
-rw-r--r--include/dt-bindings/phy/phy.h1
-rw-r--r--include/dt-bindings/pinctrl/at91.h4
-rw-r--r--include/dt-bindings/pinctrl/rockchip.h8
-rw-r--r--include/dt-bindings/pmu/exynos_ppmu.h25
-rw-r--r--include/dt-bindings/power/mt6765-power.h14
-rw-r--r--include/dt-bindings/power/qcom-rpmpd.h33
-rw-r--r--include/dt-bindings/power/r8a774b1-sysc.h26
-rw-r--r--include/dt-bindings/power/r8a77961-sysc.h32
-rw-r--r--include/dt-bindings/regulator/dlg,da9063-regulator.h16
-rw-r--r--include/dt-bindings/reset-controller/mt2712-resets.h22
-rw-r--r--include/dt-bindings/reset-controller/mt8183-resets.h17
-rw-r--r--include/dt-bindings/reset/amlogic,meson-a1-reset.h74
-rw-r--r--include/dt-bindings/reset/amlogic,meson-axg-audio-arb.h2
-rw-r--r--include/dt-bindings/reset/amlogic,meson-g12a-audio-reset.h15
-rw-r--r--include/dt-bindings/reset/amlogic,meson8b-reset.h6
-rw-r--r--include/dt-bindings/reset/nuvoton,npcm7xx-reset.h91
-rw-r--r--include/dt-bindings/reset/qcom,gcc-ipq6018.h157
-rw-r--r--include/dt-bindings/reset/realtek,rtd1295.h111
-rw-r--r--include/dt-bindings/sound/samsung-i2s.h12
-rw-r--r--include/dt-bindings/thermal/thermal_exynos.h2
-rw-r--r--include/keys/system_keyring.h6
-rw-r--r--include/keys/trusted_tpm.h (renamed from include/keys/trusted.h)49
-rw-r--r--include/kunit/assert.h357
-rw-r--r--include/kunit/test.h1507
-rw-r--r--include/kunit/try-catch.h65
-rw-r--r--include/kvm/arm_hypercalls.h43
-rw-r--r--include/kvm/arm_psci.h2
-rw-r--r--include/kvm/arm_vgic.h9
-rw-r--r--include/linux/acpi.h23
-rw-r--r--include/linux/aer.h4
-rw-r--r--include/linux/agpgart.h2
-rw-r--r--include/linux/ahci_platform.h2
-rw-r--r--include/linux/alarmtimer.h4
-rw-r--r--include/linux/alcor_pci.h1
-rw-r--r--include/linux/arch_topology.h20
-rw-r--r--include/linux/arm-smccc.h75
-rw-r--r--include/linux/arm_sdei.h6
-rw-r--r--include/linux/atmel-isc-media.h58
-rw-r--r--include/linux/attribute_container.h7
-rw-r--r--include/linux/audit.h5
-rw-r--r--include/linux/b1pcmcia.h21
-rw-r--r--include/linux/backing-dev.h10
-rw-r--r--include/linux/bio.h16
-rw-r--r--include/linux/bitmap.h92
-rw-r--r--include/linux/bitops.h17
-rw-r--r--include/linux/bits.h2
-rw-r--r--include/linux/blk-cgroup.h201
-rw-r--r--include/linux/blk-mq.h305
-rw-r--r--include/linux/blk_types.h28
-rw-r--r--include/linux/blkdev.h93
-rw-r--r--include/linux/blktrace_api.h18
-rw-r--r--include/linux/bootconfig.h227
-rw-r--r--include/linux/bpf-cgroup.h12
-rw-r--r--include/linux/bpf.h511
-rw-r--r--include/linux/bpf_types.h86
-rw-r--r--include/linux/bpf_verifier.h22
-rw-r--r--include/linux/brcmphy.h10
-rw-r--r--include/linux/bsearch.h2
-rw-r--r--include/linux/btf.h85
-rw-r--r--include/linux/buffer_head.h6
-rw-r--r--include/linux/build_bug.h4
-rw-r--r--include/linux/bvec.h44
-rw-r--r--include/linux/can/dev.h34
-rw-r--r--include/linux/can/platform/mcp251x.h22
-rw-r--r--include/linux/can/rx-offload.h7
-rw-r--r--include/linux/ceph/libceph.h11
-rw-r--r--include/linux/ceph/mdsmap.h11
-rw-r--r--include/linux/ceph/messenger.h7
-rw-r--r--include/linux/ceph/osd_client.h1
-rw-r--r--include/linux/ceph/osdmap.h4
-rw-r--r--include/linux/ceph/rados.h8
-rw-r--r--include/linux/cgroup-defs.h19
-rw-r--r--include/linux/cgroup.h28
-rw-r--r--include/linux/clk-provider.h445
-rw-r--r--include/linux/clk.h3
-rw-r--r--include/linux/clk/tegra.h24
-rw-r--r--include/linux/clk/ti.h3
-rw-r--r--include/linux/clock_cooling.h2
-rw-r--r--include/linux/clocksource.h93
-rw-r--r--include/linux/compat.h77
-rw-r--r--include/linux/completion.h8
-rw-r--r--include/linux/console.h2
-rw-r--r--include/linux/const.h5
-rw-r--r--include/linux/context_tracking.h39
-rw-r--r--include/linux/context_tracking_state.h21
-rw-r--r--include/linux/coresight.h6
-rw-r--r--include/linux/counter.h76
-rw-r--r--include/linux/cpu.h29
-rw-r--r--include/linux/cpu_cooling.h40
-rw-r--r--include/linux/cpufreq.h51
-rw-r--r--include/linux/cpuhotplug.h5
-rw-r--r--include/linux/cpuidle.h35
-rw-r--r--include/linux/cpumask.h11
-rw-r--r--include/linux/crypto.h965
-rw-r--r--include/linux/dax.h14
-rw-r--r--include/linux/dcache.h5
-rw-r--r--include/linux/debugfs.h171
-rw-r--r--include/linux/dev_printk.h235
-rw-r--r--include/linux/devfreq.h149
-rw-r--r--include/linux/device-mapper.h27
-rw-r--r--include/linux/device.h1019
-rw-r--r--include/linux/device/bus.h288
-rw-r--r--include/linux/device/class.h266
-rw-r--r--include/linux/device/driver.h292
-rw-r--r--include/linux/device_cgroup.h19
-rw-r--r--include/linux/dim.h63
-rw-r--r--include/linux/dio.h5
-rw-r--r--include/linux/dma-buf.h90
-rw-r--r--include/linux/dma-direct.h37
-rw-r--r--include/linux/dma-heap.h59
-rw-r--r--include/linux/dma-mapping.h15
-rw-r--r--include/linux/dma-noncoherent.h22
-rw-r--r--include/linux/dma/k3-psil.h71
-rw-r--r--include/linux/dma/k3-udma-glue.h134
-rw-r--r--include/linux/dma/sprd-dma.h4
-rw-r--r--include/linux/dma/ti-cppi5.h1059
-rw-r--r--include/linux/dmaengine.h166
-rw-r--r--include/linux/dmar.h16
-rw-r--r--include/linux/dmi.h4
-rw-r--r--include/linux/dsa/8021q.h7
-rw-r--r--include/linux/dsa/sja1105.h6
-rw-r--r--include/linux/dw_apb_timer.h1
-rw-r--r--include/linux/edac.h155
-rw-r--r--include/linux/efi.h962
-rw-r--r--include/linux/efi_embedded_fw.h43
-rw-r--r--include/linux/elfnote.h2
-rw-r--r--include/linux/energy_model.h3
-rw-r--r--include/linux/errname.h16
-rw-r--r--include/linux/etherdevice.h1
-rw-r--r--include/linux/ethtool_netlink.h17
-rw-r--r--include/linux/eventfd.h14
-rw-r--r--include/linux/eventpoll.h9
-rw-r--r--include/linux/export.h117
-rw-r--r--include/linux/exportfs.h5
-rw-r--r--include/linux/extable.h10
-rw-r--r--include/linux/extcon.h30
-rw-r--r--include/linux/f2fs_fs.h5
-rw-r--r--include/linux/falloc.h24
-rw-r--r--include/linux/fb.h6
-rw-r--r--include/linux/fcntl.h16
-rw-r--r--include/linux/file.h3
-rw-r--r--include/linux/filter.h100
-rw-r--r--include/linux/firmware.h9
-rw-r--r--include/linux/firmware/broadcom/tee_bnxt_fw.h14
-rw-r--r--include/linux/firmware/intel/stratix10-svc-client.h8
-rw-r--r--include/linux/firmware/meson/meson_sm.h15
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h25
-rw-r--r--include/linux/fs.h68
-rw-r--r--include/linux/fs_context.h32
-rw-r--r--include/linux/fs_parser.h101
-rw-r--r--include/linux/fscrypt.h161
-rw-r--r--include/linux/fsl/enetc_mdio.h55
-rw-r--r--include/linux/fsl/mc.h2
-rw-r--r--include/linux/fsl/ptp_qoriq.h1
-rw-r--r--include/linux/fsverity.h7
-rw-r--r--include/linux/ftrace.h157
-rw-r--r--include/linux/futex.h57
-rw-r--r--include/linux/fwnode.h54
-rw-r--r--include/linux/genalloc.h2
-rw-r--r--include/linux/genhd.h529
-rw-r--r--include/linux/gfp.h2
-rw-r--r--include/linux/gpio/consumer.h61
-rw-r--r--include/linux/gpio/driver.h39
-rw-r--r--include/linux/hardirq.h8
-rw-r--r--include/linux/hid.h2
-rw-r--r--include/linux/hmm.h190
-rw-r--r--include/linux/host1x.h54
-rw-r--r--include/linux/hrtimer.h17
-rw-r--r--include/linux/huge_mm.h8
-rw-r--r--include/linux/hugetlb.h143
-rw-r--r--include/linux/hwmon.h26
-rw-r--r--include/linux/hyperv.h35
-rw-r--r--include/linux/i2c-pxa.h18
-rw-r--r--include/linux/i2c.h148
-rw-r--r--include/linux/icmp.h15
-rw-r--r--include/linux/icmpv6.h24
-rw-r--r--include/linux/ide.h6
-rw-r--r--include/linux/ieee80211.h4
-rw-r--r--include/linux/if_ether.h8
-rw-r--r--include/linux/iio/accel/kxcjk_1013.h3
-rw-r--r--include/linux/iio/adc/ad_sigma_delta.h4
-rw-r--r--include/linux/iio/buffer_impl.h6
-rw-r--r--include/linux/iio/common/st_sensors.h12
-rw-r--r--include/linux/iio/common/st_sensors_i2c.h10
-rw-r--r--include/linux/iio/frequency/adf4350.h4
-rw-r--r--include/linux/iio/iio.h4
-rw-r--r--include/linux/iio/imu/adis.h221
-rw-r--r--include/linux/iio/magnetometer/ak8975.h17
-rw-r--r--include/linux/iio/types.h2
-rw-r--r--include/linux/ima.h23
-rw-r--r--include/linux/inet_diag.h18
-rw-r--r--include/linux/initrd.h2
-rw-r--r--include/linux/input.h1
-rw-r--r--include/linux/intel-iommu.h27
-rw-r--r--include/linux/intel-svm.h2
-rw-r--r--include/linux/interconnect-provider.h14
-rw-r--r--include/linux/interrupt.h18
-rw-r--r--include/linux/io-mapping.h7
-rw-r--r--include/linux/io-pgtable.h29
-rw-r--r--include/linux/io.h6
-rw-r--r--include/linux/ioasid.h76
-rw-r--r--include/linux/ioc3.h93
-rw-r--r--include/linux/iocontext.h1
-rw-r--r--include/linux/iomap.h129
-rw-r--r--include/linux/iommu.h82
-rw-r--r--include/linux/ioport.h1
-rw-r--r--include/linux/ipmi-fru.h134
-rw-r--r--include/linux/ipmi_smi.h12
-rw-r--r--include/linux/irq.h37
-rw-r--r--include/linux/irq_work.h12
-rw-r--r--include/linux/irqchip/arm-gic-common.h2
-rw-r--r--include/linux/irqchip/arm-gic-v3.h86
-rw-r--r--include/linux/irqchip/arm-gic-v4.h52
-rw-r--r--include/linux/irqchip/ingenic.h14
-rw-r--r--include/linux/irqdomain.h15
-rw-r--r--include/linux/irqflags.h82
-rw-r--r--include/linux/isdn/capilli.h18
-rw-r--r--include/linux/isdn/capiutil.h456
-rw-r--r--include/linux/jbd2.h121
-rw-r--r--include/linux/jiffies.h24
-rw-r--r--include/linux/journal-head.h21
-rw-r--r--include/linux/kasan.h40
-rw-r--r--include/linux/kcov.h23
-rw-r--r--include/linux/kdb.h2
-rw-r--r--include/linux/kernel.h42
-rw-r--r--include/linux/kernel_stat.h18
-rw-r--r--include/linux/kernelcapi.h75
-rw-r--r--include/linux/kernfs.h57
-rw-r--r--include/linux/ktime.h46
-rw-r--r--include/linux/kvm_host.h85
-rw-r--r--include/linux/kvm_types.h11
-rw-r--r--include/linux/led-class-flash.h41
-rw-r--r--include/linux/leds-bd2802.h1
-rw-r--r--include/linux/leds.h106
-rw-r--r--include/linux/libata.h195
-rw-r--r--include/linux/libfdt_env.h5
-rw-r--r--include/linux/libnvdimm.h7
-rw-r--r--include/linux/license.h1
-rw-r--r--include/linux/limits.h13
-rw-r--r--include/linux/linkage.h249
-rw-r--r--include/linux/linkmode.h6
-rw-r--r--include/linux/list.h146
-rw-r--r--include/linux/list_nulls.h30
-rw-r--r--include/linux/livepatch.h17
-rw-r--r--include/linux/lockd/debug.h4
-rw-r--r--include/linux/lockd/lockd.h4
-rw-r--r--include/linux/lockdep.h115
-rw-r--r--include/linux/logic_pio.h4
-rw-r--r--include/linux/lsm_audit.h2
-rw-r--r--include/linux/lsm_hooks.h15
-rw-r--r--include/linux/mailbox/mtk-cmdq-mailbox.h11
-rw-r--r--include/linux/math64.h20
-rw-r--r--include/linux/memblock.h10
-rw-r--r--include/linux/memcontrol.h61
-rw-r--r--include/linux/memory.h29
-rw-r--r--include/linux/memory_hotplug.h25
-rw-r--r--include/linux/memregion.h23
-rw-r--r--include/linux/mfd/abx500/ab8500-gpadc.h75
-rw-r--r--include/linux/mfd/arizona/registers.h7
-rw-r--r--include/linux/mfd/core.h49
-rw-r--r--include/linux/mfd/cros_ec.h35
-rw-r--r--include/linux/mfd/db8500-prcmu.h22
-rw-r--r--include/linux/mfd/dbx500-prcmu.h37
-rw-r--r--include/linux/mfd/madera/core.h11
-rw-r--r--include/linux/mfd/max77620.h1
-rw-r--r--include/linux/mfd/mt6397/rtc.h79
-rw-r--r--include/linux/mfd/rk808.h2
-rw-r--r--include/linux/mfd/rohm-bd70528.h19
-rw-r--r--include/linux/mfd/rohm-bd71828.h423
-rw-r--r--include/linux/mfd/rohm-bd718x7.h6
-rw-r--r--include/linux/mfd/rohm-generic.h70
-rw-r--r--include/linux/mfd/rohm-shared.h21
-rw-r--r--include/linux/mfd/stm32-timers.h12
-rw-r--r--include/linux/mfd/syscon.h14
-rw-r--r--include/linux/mfd/syscon/atmel-matrix.h1
-rw-r--r--include/linux/mfd/tmio.h3
-rw-r--r--include/linux/mfd/twl.h12
-rw-r--r--include/linux/mfd/wcd934x/registers.h531
-rw-r--r--include/linux/mfd/wcd934x/wcd934x.h31
-rw-r--r--include/linux/mii.h50
-rw-r--r--include/linux/mii_timestamper.h121
-rw-r--r--include/linux/min_heap.h134
-rw-r--r--include/linux/miscdevice.h2
-rw-r--r--include/linux/mlx4/cq.h5
-rw-r--r--include/linux/mlx4/device.h2
-rw-r--r--include/linux/mlx5/device.h23
-rw-r--r--include/linux/mlx5/driver.h31
-rw-r--r--include/linux/mlx5/fs.h23
-rw-r--r--include/linux/mlx5/mlx5_ifc.h276
-rw-r--r--include/linux/mm.h210
-rw-r--r--include/linux/mm_types.h9
-rw-r--r--include/linux/mmc/card.h3
-rw-r--r--include/linux/mmc/host.h1
-rw-r--r--include/linux/mmc/sdio_ids.h4
-rw-r--r--include/linux/mmc/slot-gpio.h5
-rw-r--r--include/linux/mmu_notifier.h197
-rw-r--r--include/linux/mmzone.h96
-rw-r--r--include/linux/mod_devicetable.h8
-rw-r--r--include/linux/module.h25
-rw-r--r--include/linux/moduleloader.h2
-rw-r--r--include/linux/moduleparam.h86
-rw-r--r--include/linux/most.h337
-rw-r--r--include/linux/mroute_base.h28
-rw-r--r--include/linux/msdos_partition.h50
-rw-r--r--include/linux/mtd/flashchip.h2
-rw-r--r--include/linux/mtd/spi-nor.h76
-rw-r--r--include/linux/mtio.h60
-rw-r--r--include/linux/mutex.h7
-rw-r--r--include/linux/namei.h14
-rw-r--r--include/linux/nd.h2
-rw-r--r--include/linux/net.h1
-rw-r--r--include/linux/netdev_features.h9
-rw-r--r--include/linux/netdevice.h189
-rw-r--r--include/linux/netfilter.h41
-rw-r--r--include/linux/netfilter/ipset/ip_set.h214
-rw-r--r--include/linux/netfilter/ipset/ip_set_bitmap.h14
-rw-r--r--include/linux/netfilter/ipset/ip_set_getport.h3
-rw-r--r--include/linux/netfilter/nfnetlink.h2
-rw-r--r--include/linux/netlink.h13
-rw-r--r--include/linux/nfs4.h26
-rw-r--r--include/linux/nfs_fs.h33
-rw-r--r--include/linux/nfs_fs_sb.h7
-rw-r--r--include/linux/nfs_xdr.h43
-rw-r--r--include/linux/notifier.h4
-rw-r--r--include/linux/nsproxy.h2
-rw-r--r--include/linux/nvme-fc-driver.h4
-rw-r--r--include/linux/nvme-fc.h182
-rw-r--r--include/linux/nvme.h60
-rw-r--r--include/linux/nvmem-consumer.h11
-rw-r--r--include/linux/nvmem-provider.h3
-rw-r--r--include/linux/of.h8
-rw-r--r--include/linux/of_address.h21
-rw-r--r--include/linux/of_clk.h11
-rw-r--r--include/linux/of_mdio.h6
-rw-r--r--include/linux/of_net.h7
-rw-r--r--include/linux/of_pci.h5
-rw-r--r--include/linux/omap-dma.h18
-rw-r--r--include/linux/padata.h56
-rw-r--r--include/linux/page-flags.h2
-rw-r--r--include/linux/page-isolation.h8
-rw-r--r--include/linux/pagemap.h28
-rw-r--r--include/linux/pagewalk.h58
-rw-r--r--include/linux/parport.h1
-rw-r--r--include/linux/part_stat.h115
-rw-r--r--include/linux/pci-ats.h78
-rw-r--r--include/linux/pci-epc.h2
-rw-r--r--include/linux/pci.h65
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/pe.h21
-rw-r--r--include/linux/percpu-defs.h3
-rw-r--r--include/linux/percpu-refcount.h42
-rw-r--r--include/linux/percpu-rwsem.h83
-rw-r--r--include/linux/perf/arm_pmu.h1
-rw-r--r--include/linux/perf_event.h103
-rw-r--r--include/linux/phy.h154
-rw-r--r--include/linux/phy/phy-dp.h95
-rw-r--r--include/linux/phy/phy.h17
-rw-r--r--include/linux/phy/tegra/xusb.h6
-rw-r--r--include/linux/phy_led_triggers.h2
-rw-r--r--include/linux/phylink.h27
-rw-r--r--include/linux/pid.h7
-rw-r--r--include/linux/pid_namespace.h2
-rw-r--r--include/linux/pinctrl/consumer.h6
-rw-r--r--include/linux/pinctrl/machine.h5
-rw-r--r--include/linux/pipe_fs_i.h71
-rw-r--r--include/linux/platform_data/ad7266.h3
-rw-r--r--include/linux/platform_data/ads1015.h23
-rw-r--r--include/linux/platform_data/b53.h2
-rw-r--r--include/linux/platform_data/bd6107.h1
-rw-r--r--include/linux/platform_data/cros_ec_commands.h285
-rw-r--r--include/linux/platform_data/cros_ec_proto.h157
-rw-r--r--include/linux/platform_data/cros_ec_sensorhub.h30
-rw-r--r--include/linux/platform_data/crypto-atmel.h23
-rw-r--r--include/linux/platform_data/dmtimer-omap.h6
-rw-r--r--include/linux/platform_data/ehci-sh.h16
-rw-r--r--include/linux/platform_data/eth_ixp4xx.h19
-rw-r--r--include/linux/platform_data/gpio_backlight.h3
-rw-r--r--include/linux/platform_data/hsmmc-omap.h3
-rw-r--r--include/linux/platform_data/i2c-pxa.h4
-rw-r--r--include/linux/platform_data/intel-spi.h1
-rw-r--r--include/linux/platform_data/microchip-ksz.h2
-rw-r--r--include/linux/platform_data/mlxreg.h2
-rw-r--r--include/linux/platform_data/mv_usb.h8
-rw-r--r--include/linux/platform_data/pixcir_i2c_ts.h64
-rw-r--r--include/linux/platform_data/spi-mt65xx.h1
-rw-r--r--include/linux/platform_data/spi-omap2-mcspi.h1
-rw-r--r--include/linux/platform_data/st_sensors_pdata.h2
-rw-r--r--include/linux/platform_data/tc35876x.h11
-rw-r--r--include/linux/platform_data/ti-prm.h21
-rw-r--r--include/linux/platform_data/ti-sysc.h3
-rw-r--r--include/linux/platform_data/usb3503.h3
-rw-r--r--include/linux/platform_data/wan_ixp4xx_hss.h17
-rw-r--r--include/linux/platform_data/wilco-ec.h15
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h1
-rw-r--r--include/linux/platform_device.h77
-rw-r--r--include/linux/pm.h2
-rw-r--r--include/linux/pm_domain.h13
-rw-r--r--include/linux/pm_opp.h13
-rw-r--r--include/linux/pm_qos.h167
-rw-r--r--include/linux/pm_runtime.h12
-rw-r--r--include/linux/pm_wakeup.h9
-rw-r--r--include/linux/pmbus.h11
-rw-r--r--include/linux/pnp.h2
-rw-r--r--include/linux/posix-clock.h19
-rw-r--r--include/linux/posix-timers.h2
-rw-r--r--include/linux/power/max17042_battery.h48
-rw-r--r--include/linux/power/smartreflex.h3
-rw-r--r--include/linux/power_supply.h10
-rw-r--r--include/linux/preempt.h30
-rw-r--r--include/linux/printk.h3
-rw-r--r--include/linux/proc_fs.h27
-rw-r--r--include/linux/proc_ns.h7
-rw-r--r--include/linux/property.h194
-rw-r--r--include/linux/psci.h11
-rw-r--r--include/linux/psi.h2
-rw-r--r--include/linux/psi_types.h10
-rw-r--r--include/linux/psp-tee.h91
-rw-r--r--include/linux/ptdump.h22
-rw-r--r--include/linux/ptp_clock_kernel.h9
-rw-r--r--include/linux/ptr_ring.h1
-rw-r--r--include/linux/pwm.h5
-rw-r--r--include/linux/pxa2xx_ssp.h2
-rw-r--r--include/linux/qcom_scm.h133
-rw-r--r--include/linux/qed/common_hsi.h44
-rw-r--r--include/linux/qed/eth_common.h78
-rw-r--r--include/linux/qed/iscsi_common.h64
-rw-r--r--include/linux/qed/qed_if.h14
-rw-r--r--include/linux/qed/qed_ll2_if.h7
-rw-r--r--include/linux/qed/storage_common.h3
-rw-r--r--include/linux/quota.h2
-rw-r--r--include/linux/quotaops.h14
-rw-r--r--include/linux/raid/detect.h3
-rw-r--r--include/linux/raid/pq.h7
-rw-r--r--include/linux/ramfs.h4
-rw-r--r--include/linux/random.h24
-rw-r--r--include/linux/rbtree_augmented.h6
-rw-r--r--include/linux/rcu_segcblist.h2
-rw-r--r--include/linux/rculist.h42
-rw-r--r--include/linux/rculist_bl.h28
-rw-r--r--include/linux/rculist_nulls.h64
-rw-r--r--include/linux/rcupdate.h34
-rw-r--r--include/linux/rcutiny.h3
-rw-r--r--include/linux/rcutree.h3
-rw-r--r--include/linux/rcuwait.h12
-rw-r--r--include/linux/refcount.h269
-rw-r--r--include/linux/regmap.h49
-rw-r--r--include/linux/regulator/ab8500.h3
-rw-r--r--include/linux/regulator/consumer.h7
-rw-r--r--include/linux/regulator/driver.h4
-rw-r--r--include/linux/regulator/fixed.h1
-rw-r--r--include/linux/remoteproc/mtk_scp.h66
-rw-r--r--include/linux/resctrl.h14
-rw-r--r--include/linux/reset-controller.h3
-rw-r--r--include/linux/reset.h46
-rw-r--r--include/linux/resource_ext.h12
-rw-r--r--include/linux/rhashtable.h2
-rw-r--r--include/linux/ring_buffer.h110
-rw-r--r--include/linux/rpmsg/mtk_rpmsg.h38
-rw-r--r--include/linux/rtc.h6
-rw-r--r--include/linux/rtc/ds1685.h12
-rw-r--r--include/linux/rtsx_pci.h1
-rw-r--r--include/linux/rwlock_api_smp.h16
-rw-r--r--include/linux/rwlock_types.h6
-rw-r--r--include/linux/rwsem.h12
-rw-r--r--include/linux/sbitmap.h9
-rw-r--r--include/linux/sched.h68
-rw-r--r--include/linux/sched/cpufreq.h4
-rw-r--r--include/linux/sched/isolation.h1
-rw-r--r--include/linux/sched/mm.h6
-rw-r--r--include/linux/sched/nohz.h2
-rw-r--r--include/linux/sched/task.h3
-rw-r--r--include/linux/sched/topology.h8
-rw-r--r--include/linux/scmi_protocol.h5
-rw-r--r--include/linux/seccomp.h9
-rw-r--r--include/linux/security.h43
-rw-r--r--include/linux/sed-opal.h1
-rw-r--r--include/linux/seq_buf.h3
-rw-r--r--include/linux/seq_file.h13
-rw-r--r--include/linux/seqlock.h4
-rw-r--r--include/linux/serial_8250.h1
-rw-r--r--include/linux/serial_core.h91
-rw-r--r--include/linux/sfp.h126
-rw-r--r--include/linux/shmem_fs.h3
-rw-r--r--include/linux/signal.h8
-rw-r--r--include/linux/skbuff.h130
-rw-r--r--include/linux/skmsg.h51
-rw-r--r--include/linux/slab.h21
-rw-r--r--include/linux/smp.h11
-rw-r--r--include/linux/soc/mediatek/mtk-cmdq.h53
-rw-r--r--include/linux/soc/mediatek/mtk_sip_svc.h25
-rw-r--r--include/linux/soc/mmp/cputype.h87
-rw-r--r--include/linux/soc/qcom/irq.h34
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h94
-rw-r--r--include/linux/soc/qcom/smd-rpm.h1
-rw-r--r--include/linux/soc/samsung/exynos-pmu.h2
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h16
-rw-r--r--include/linux/soc/ti/k3-ringacc.h244
-rw-r--r--include/linux/socket.h25
-rw-r--r--include/linux/sort.h8
-rw-r--r--include/linux/soundwire/sdw.h30
-rw-r--r--include/linux/soundwire/sdw_intel.h167
-rw-r--r--include/linux/spi/spi.h145
-rw-r--r--include/linux/spi/spi_oc_tiny.h4
-rw-r--r--include/linux/spinlock.h35
-rw-r--r--include/linux/spinlock_api_smp.h8
-rw-r--r--include/linux/spinlock_types.h24
-rw-r--r--include/linux/splice.h3
-rw-r--r--include/linux/stackprotector.h2
-rw-r--r--include/linux/stat.h3
-rw-r--r--include/linux/stmmac.h18
-rw-r--r--include/linux/stop_machine.h16
-rw-r--r--include/linux/string.h3
-rw-r--r--include/linux/sunrpc/auth.h5
-rw-r--r--include/linux/sunrpc/auth_gss.h2
-rw-r--r--include/linux/sunrpc/cache.h42
-rw-r--r--include/linux/sunrpc/clnt.h4
-rw-r--r--include/linux/sunrpc/gss_api.h9
-rw-r--r--include/linux/sunrpc/gss_err.h3
-rw-r--r--include/linux/sunrpc/gss_krb5.h2
-rw-r--r--include/linux/sunrpc/msg_prot.h3
-rw-r--r--include/linux/sunrpc/rpc_pipe_fs.h3
-rw-r--r--include/linux/sunrpc/stats.h4
-rw-r--r--include/linux/sunrpc/svcauth.h4
-rw-r--r--include/linux/sunrpc/svcauth_gss.h2
-rw-r--r--include/linux/sunrpc/xdr.h3
-rw-r--r--include/linux/sunrpc/xprt.h7
-rw-r--r--include/linux/sunrpc/xprtsock.h4
-rw-r--r--include/linux/suspend.h4
-rw-r--r--include/linux/swab.h1
-rw-r--r--include/linux/swap.h2
-rw-r--r--include/linux/swiotlb.h11
-rw-r--r--include/linux/switchtec.h160
-rw-r--r--include/linux/sxgbe_platform.h6
-rw-r--r--include/linux/sys_soc.h1
-rw-r--r--include/linux/syscalls.h31
-rw-r--r--include/linux/sysctl.h6
-rw-r--r--include/linux/tcp.h37
-rw-r--r--include/linux/thermal.h22
-rw-r--r--include/linux/thread_info.h2
-rw-r--r--include/linux/threads.h2
-rw-r--r--include/linux/tick.h12
-rw-r--r--include/linux/time.h25
-rw-r--r--include/linux/time32.h166
-rw-r--r--include/linux/time64.h10
-rw-r--r--include/linux/time_namespace.h133
-rw-r--r--include/linux/timekeeping32.h32
-rw-r--r--include/linux/timer.h2
-rw-r--r--include/linux/tnum.h2
-rw-r--r--include/linux/tpm.h251
-rw-r--r--include/linux/trace.h8
-rw-r--r--include/linux/trace_events.h157
-rw-r--r--include/linux/trace_seq.h4
-rw-r--r--include/linux/tracefs.h1
-rw-r--r--include/linux/transport_class.h6
-rw-r--r--include/linux/tty.h2
-rw-r--r--include/linux/types.h10
-rw-r--r--include/linux/u64_stats_sync.h51
-rw-r--r--include/linux/uaccess.h16
-rw-r--r--include/linux/uio.h4
-rw-r--r--include/linux/units.h84
-rw-r--r--include/linux/usb.h11
-rw-r--r--include/linux/usb/audio-v2.h2
-rw-r--r--include/linux/usb/audio-v3.h2
-rw-r--r--include/linux/usb/ehci_def.h2
-rw-r--r--include/linux/usb/gadget.h2
-rw-r--r--include/linux/usb/gpio_vbus.h33
-rw-r--r--include/linux/usb/hcd.h2
-rw-r--r--include/linux/usb/irda.h13
-rw-r--r--include/linux/usb/pd.h33
-rw-r--r--include/linux/usb/pd_vdo.h32
-rw-r--r--include/linux/usb/quirks.h3
-rw-r--r--include/linux/usb/renesas_usbhs.h2
-rw-r--r--include/linux/usb/role.h26
-rw-r--r--include/linux/usb/tcpm.h41
-rw-r--r--include/linux/usb/tegra_usb_phy.h4
-rw-r--r--include/linux/usb/typec.h53
-rw-r--r--include/linux/usb/typec_altmode.h27
-rw-r--r--include/linux/usb/typec_mux.h35
-rw-r--r--include/linux/usb/typec_tbt.h53
-rw-r--r--include/linux/usb/ulpi.h11
-rw-r--r--include/linux/usb/usb_phy_generic.h12
-rw-r--r--include/linux/usb/usbnet.h2
-rw-r--r--include/linux/usb_usual.h2
-rw-r--r--include/linux/usbdevice_fs.h2
-rw-r--r--include/linux/user_namespace.h1
-rw-r--r--include/linux/uuid.h22
-rw-r--r--include/linux/virtio_vsock.h18
-rw-r--r--include/linux/vm_sockets.h15
-rw-r--r--include/linux/vmalloc.h20
-rw-r--r--include/linux/vmstat.h50
-rw-r--r--include/linux/vmw_vmci_api.h2
-rw-r--r--include/linux/vtime.h59
-rw-r--r--include/linux/w1.h1
-rw-r--r--include/linux/wait.h12
-rw-r--r--include/linux/workqueue.h16
-rw-r--r--include/linux/ww_mutex.h2
-rw-r--r--include/linux/xarray.h45
-rw-r--r--include/linux/zlib.h6
-rw-r--r--include/linux/zorro.h12
-rw-r--r--include/media/cec-notifier.h73
-rw-r--r--include/media/cec-pin.h10
-rw-r--r--include/media/cec.h77
-rw-r--r--include/media/dvb-usb-ids.h3
-rw-r--r--include/media/h264-ctrls.h2
-rw-r--r--include/media/hevc-ctrls.h212
-rw-r--r--include/media/i2c/smiapp.h64
-rw-r--r--include/media/rc-core.h8
-rw-r--r--include/media/rc-map.h29
-rw-r--r--include/media/v4l2-common.h54
-rw-r--r--include/media/v4l2-ctrls.h87
-rw-r--r--include/media/v4l2-dev.h4
-rw-r--r--include/media/v4l2-device.h30
-rw-r--r--include/media/v4l2-fwnode.h143
-rw-r--r--include/media/v4l2-ioctl.h55
-rw-r--r--include/media/v4l2-mc.h40
-rw-r--r--include/media/v4l2-mem2mem.h177
-rw-r--r--include/media/v4l2-rect.h8
-rw-r--r--include/media/v4l2-subdev.h4
-rw-r--r--include/media/videobuf2-core.h10
-rw-r--r--include/media/videobuf2-v4l2.h5
-rw-r--r--include/net/act_api.h47
-rw-r--r--include/net/addrconf.h14
-rw-r--r--include/net/af_rxrpc.h12
-rw-r--r--include/net/af_unix.h5
-rw-r--r--include/net/af_vsock.h47
-rw-r--r--include/net/arp.h4
-rw-r--r--include/net/bluetooth/bluetooth.h8
-rw-r--r--include/net/bluetooth/hci.h165
-rw-r--r--include/net/bluetooth/hci_core.h12
-rw-r--r--include/net/bluetooth/hci_mon.h2
-rw-r--r--include/net/bluetooth/mgmt.h17
-rw-r--r--include/net/cfg80211.h18
-rw-r--r--include/net/compat.h3
-rw-r--r--include/net/devlink.h92
-rw-r--r--include/net/dsa.h125
-rw-r--r--include/net/dsfield.h2
-rw-r--r--include/net/dst.h15
-rw-r--r--include/net/dst_ops.h3
-rw-r--r--include/net/espintcp.h39
-rw-r--r--include/net/fib_notifier.h13
-rw-r--r--include/net/fib_rules.h4
-rw-r--r--include/net/flow_dissector.h44
-rw-r--r--include/net/flow_offload.h15
-rw-r--r--include/net/garp.h2
-rw-r--r--include/net/gen_stats.h6
-rw-r--r--include/net/genetlink.h20
-rw-r--r--include/net/icmp.h6
-rw-r--r--include/net/inet_hashtables.h12
-rw-r--r--include/net/ip.h21
-rw-r--r--include/net/ip6_fib.h62
-rw-r--r--include/net/ip_fib.h34
-rw-r--r--include/net/ip_tunnels.h6
-rw-r--r--include/net/ip_vs.h2
-rw-r--r--include/net/ipv6.h10
-rw-r--r--include/net/ipv6_stubs.h6
-rw-r--r--include/net/ipx.h5
-rw-r--r--include/net/mac80211.h97
-rw-r--r--include/net/macsec.h224
-rw-r--r--include/net/mptcp.h185
-rw-r--r--include/net/mrp.h2
-rw-r--r--include/net/ndisc.h8
-rw-r--r--include/net/neighbour.h3
-rw-r--r--include/net/net_namespace.h16
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h10
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h2
-rw-r--r--include/net/netfilter/nf_flow_table.h89
-rw-r--r--include/net/netfilter/nf_tables.h44
-rw-r--r--include/net/netfilter/nf_tables_core.h3
-rw-r--r--include/net/netfilter/nf_tables_offload.h1
-rw-r--r--include/net/netlink.h8
-rw-r--r--include/net/netns/ipv4.h1
-rw-r--r--include/net/netns/ipv6.h3
-rw-r--r--include/net/netns/mib.h3
-rw-r--r--include/net/netns/nftables.h1
-rw-r--r--include/net/netns/sctp.h14
-rw-r--r--include/net/netprio_cgroup.h2
-rw-r--r--include/net/page_pool.h85
-rw-r--r--include/net/pie.h138
-rw-r--r--include/net/pkt_cls.h91
-rw-r--r--include/net/route.h4
-rw-r--r--include/net/sch_generic.h42
-rw-r--r--include/net/sctp/constants.h12
-rw-r--r--include/net/sctp/structs.h16
-rw-r--r--include/net/sctp/ulpevent.h16
-rw-r--r--include/net/smc.h7
-rw-r--r--include/net/snmp.h6
-rw-r--r--include/net/sock.h105
-rw-r--r--include/net/tcp.h102
-rw-r--r--include/net/tls.h81
-rw-r--r--include/net/tls_toe.h77
-rw-r--r--include/net/udp.h12
-rw-r--r--include/net/vsock_addr.h2
-rw-r--r--include/net/x25.h3
-rw-r--r--include/net/xdp_priv.h4
-rw-r--r--include/net/xdp_sock.h62
-rw-r--r--include/net/xfrm.h11
-rw-r--r--include/rdma/ib_cm.h66
-rw-r--r--include/rdma/ib_mad.h40
-rw-r--r--include/rdma/ib_umem.h8
-rw-r--r--include/rdma/ib_umem_odp.h88
-rw-r--r--include/rdma/ib_verbs.h137
-rw-r--r--include/rdma/iba.h146
-rw-r--r--include/rdma/ibta_vol1_c12.h213
-rw-r--r--include/rdma/rdmavt_qp.h22
-rw-r--r--include/rdma/restrack.h5
-rw-r--r--include/rdma/uverbs_named_ioctl.h6
-rw-r--r--include/rdma/uverbs_std_types.h13
-rw-r--r--include/rdma/uverbs_types.h34
-rw-r--r--include/scsi/scsi_cmnd.h5
-rw-r--r--include/scsi/scsi_device.h6
-rw-r--r--include/scsi/scsi_host.h19
-rw-r--r--include/scsi/scsi_ioctl.h1
-rw-r--r--include/scsi/scsicam.h7
-rw-r--r--include/scsi/sg.h30
-rw-r--r--include/soc/fsl/cpm.h171
-rw-r--r--include/soc/fsl/qe/qe.h59
-rw-r--r--include/soc/fsl/qe/qe_ic.h135
-rw-r--r--include/soc/fsl/qe/ucc_fast.h4
-rw-r--r--include/soc/fsl/qe/ucc_slow.h6
-rw-r--r--include/soc/fsl/qman.h11
-rw-r--r--include/soc/mscc/ocelot.h545
-rw-r--r--include/soc/mscc/ocelot_ana.h625
-rw-r--r--include/soc/mscc/ocelot_dev.h275
-rw-r--r--include/soc/mscc/ocelot_qsys.h270
-rw-r--r--include/soc/mscc/ocelot_sys.h144
-rw-r--r--include/soc/qcom/ocmem.h65
-rw-r--r--include/soc/sifive/sifive_l2_cache.h16
-rw-r--r--include/soc/tegra/mc.h2
-rw-r--r--include/sound/ac97_codec.h5
-rw-r--r--include/sound/aess.h53
-rw-r--r--include/sound/control.h10
-rw-r--r--include/sound/core.h8
-rw-r--r--include/sound/dmaengine_pcm.h5
-rw-r--r--include/sound/hda_codec.h2
-rw-r--r--include/sound/hda_regmap.h3
-rw-r--r--include/sound/hdaudio.h81
-rw-r--r--include/sound/info.h2
-rw-r--r--include/sound/initval.h6
-rw-r--r--include/sound/intel-dsp-config.h34
-rw-r--r--include/sound/memalloc.h2
-rw-r--r--include/sound/pcm.h94
-rw-r--r--include/sound/pxa2xx-lib.h26
-rw-r--r--include/sound/rawmidi.h6
-rw-r--r--include/sound/rt5682.h1
-rw-r--r--include/sound/seq_midi_emul.h3
-rw-r--r--include/sound/simple_card_utils.h1
-rw-r--r--include/sound/soc-acpi-intel-match.h9
-rw-r--r--include/sound/soc-acpi.h25
-rw-r--r--include/sound/soc-component.h52
-rw-r--r--include/sound/soc-dai.h3
-rw-r--r--include/sound/soc-dapm.h1
-rw-r--r--include/sound/soc-dpcm.h18
-rw-r--r--include/sound/soc.h73
-rw-r--r--include/sound/sof.h12
-rw-r--r--include/sound/sof/channel_map.h61
-rw-r--r--include/sound/sof/dai-imx.h54
-rw-r--r--include/sound/sof/dai.h3
-rw-r--r--include/sound/sof/header.h2
-rw-r--r--include/sound/sof/info.h15
-rw-r--r--include/sound/sof/pm.h8
-rw-r--r--include/sound/sof/stream.h4
-rw-r--r--include/sound/sof/topology.h27
-rw-r--r--include/sound/timer.h10
-rw-r--r--include/sound/vx_core.h9
-rw-r--r--include/sound/wm8904.h2
-rw-r--r--include/target/target_core_base.h1
-rw-r--r--include/trace/bpf_probe.h3
-rw-r--r--include/trace/events/afs.h14
-rw-r--r--include/trace/events/bcache.h3
-rw-r--r--include/trace/events/bridge.h12
-rw-r--r--include/trace/events/btrfs.h143
-rw-r--r--include/trace/events/cgroup.h6
-rw-r--r--include/trace/events/ext4.h40
-rw-r--r--include/trace/events/f2fs.h103
-rw-r--r--include/trace/events/filemap.h2
-rw-r--r--include/trace/events/fsi.h6
-rw-r--r--include/trace/events/fsi_master_aspeed.h77
-rw-r--r--include/trace/events/huge_memory.h3
-rw-r--r--include/trace/events/intel_iommu.h48
-rw-r--r--include/trace/events/io_uring.h466
-rw-r--r--include/trace/events/jbd2.h16
-rw-r--r--include/trace/events/kmem.h51
-rw-r--r--include/trace/events/page_pool.h44
-rw-r--r--include/trace/events/power.h59
-rw-r--r--include/trace/events/preemptirq.h8
-rw-r--r--include/trace/events/pwm.h58
-rw-r--r--include/trace/events/rcu.h116
-rw-r--r--include/trace/events/rdma_core.h394
-rw-r--r--include/trace/events/rpcgss.h45
-rw-r--r--include/trace/events/rpcrdma.h240
-rw-r--r--include/trace/events/rpm.h6
-rw-r--r--include/trace/events/sched.h53
-rw-r--r--include/trace/events/scmi.h90
-rw-r--r--include/trace/events/sctp.h9
-rw-r--r--include/trace/events/sock.h5
-rw-r--r--include/trace/events/sunrpc.h150
-rw-r--r--include/trace/events/timer.h19
-rw-r--r--include/trace/events/v4l2.h2
-rw-r--r--include/trace/events/wbt.h12
-rw-r--r--include/trace/events/workqueue.h50
-rw-r--r--include/trace/events/writeback.h177
-rw-r--r--include/trace/events/xdp.h151
-rw-r--r--include/trace/events/xen.h6
-rw-r--r--include/trace/trace_events.h82
-rw-r--r--include/uapi/asm-generic/ipcbuf.h2
-rw-r--r--include/uapi/asm-generic/mman-common.h2
-rw-r--r--include/uapi/asm-generic/msgbuf.h14
-rw-r--r--include/uapi/asm-generic/posix_types.h3
-rw-r--r--include/uapi/asm-generic/sembuf.h8
-rw-r--r--include/uapi/asm-generic/shmbuf.h12
-rw-r--r--include/uapi/asm-generic/unistd.h7
-rw-r--r--include/uapi/drm/amdgpu_drm.h5
-rw-r--r--include/uapi/drm/drm.h3
-rw-r--r--include/uapi/drm/drm_fourcc.h52
-rw-r--r--include/uapi/drm/exynos_drm.h4
-rw-r--r--include/uapi/drm/i915_drm.h160
-rw-r--r--include/uapi/drm/nouveau_drm.h1
-rw-r--r--include/uapi/drm/omap_drm.h18
-rw-r--r--include/uapi/drm/v3d_drm.h8
-rw-r--r--include/uapi/drm/vmwgfx_drm.h21
-rw-r--r--include/uapi/linux/acct.h2
-rw-r--r--include/uapi/linux/audit.h2
-rw-r--r--include/uapi/linux/b1lli.h74
-rw-r--r--include/uapi/linux/batadv_packet.h2
-rw-r--r--include/uapi/linux/batman_adv.h2
-rw-r--r--include/uapi/linux/bcache.h52
-rw-r--r--include/uapi/linux/blkzoned.h17
-rw-r--r--include/uapi/linux/bpf.h284
-rw-r--r--include/uapi/linux/btf.h9
-rw-r--r--include/uapi/linux/btrfs.h42
-rw-r--r--include/uapi/linux/btrfs_tree.h23
-rw-r--r--include/uapi/linux/capability.h1
-rw-r--r--include/uapi/linux/cec-funcs.h34
-rw-r--r--include/uapi/linux/cec.h133
-rw-r--r--include/uapi/linux/chio.h11
-rw-r--r--include/uapi/linux/cyclades.h6
-rw-r--r--include/uapi/linux/dcbnl.h2
-rw-r--r--include/uapi/linux/devlink.h4
-rw-r--r--include/uapi/linux/dm-ioctl.h4
-rw-r--r--include/uapi/linux/dma-heap.h53
-rw-r--r--include/uapi/linux/elfcore.h8
-rw-r--r--include/uapi/linux/errqueue.h7
-rw-r--r--include/uapi/linux/ethtool.h17
-rw-r--r--include/uapi/linux/ethtool_netlink.h237
-rw-r--r--include/uapi/linux/fcntl.h11
-rw-r--r--include/uapi/linux/fdreg.h18
-rw-r--r--include/uapi/linux/fscrypt.h18
-rw-r--r--include/uapi/linux/gen_stats.h5
-rw-r--r--include/uapi/linux/gigaset_dev.h39
-rw-r--r--include/uapi/linux/gpio.h24
-rw-r--r--include/uapi/linux/hdlc/ioctl.h9
-rw-r--r--include/uapi/linux/hidraw.h1
-rw-r--r--include/uapi/linux/hysdn_if.h34
-rw-r--r--include/uapi/linux/idxd.h228
-rw-r--r--include/uapi/linux/if.h2
-rw-r--r--include/uapi/linux/if_bonding.h10
-rw-r--r--include/uapi/linux/if_bridge.h41
-rw-r--r--include/uapi/linux/if_link.h10
-rw-r--r--include/uapi/linux/if_macsec.h11
-rw-r--r--include/uapi/linux/in.h4
-rw-r--r--include/uapi/linux/input-event-codes.h80
-rw-r--r--include/uapi/linux/input.h1
-rw-r--r--include/uapi/linux/io_uring.h166
-rw-r--r--include/uapi/linux/iommu.h169
-rw-r--r--include/uapi/linux/kcov.h28
-rw-r--r--include/uapi/linux/kvm.h17
-rw-r--r--include/uapi/linux/lwtunnel.h41
-rw-r--r--include/uapi/linux/magic.h2
-rw-r--r--include/uapi/linux/media-bus-format.h3
-rw-r--r--include/uapi/linux/mii.h12
-rw-r--r--include/uapi/linux/msg.h6
-rw-r--r--include/uapi/linux/net_tstamp.h8
-rw-r--r--include/uapi/linux/netfilter/ipset/ip_set.h2
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h12
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h51
-rw-r--r--include/uapi/linux/netfilter/xt_sctp.h6
-rw-r--r--include/uapi/linux/netfilter_arp/arp_tables.h2
-rw-r--r--include/uapi/linux/netfilter_bridge/ebtables.h2
-rw-r--r--include/uapi/linux/netfilter_ipv4/ip_tables.h2
-rw-r--r--include/uapi/linux/netfilter_ipv6/ip6_tables.h2
-rw-r--r--include/uapi/linux/nl80211.h39
-rw-r--r--include/uapi/linux/openat2.h39
-rw-r--r--include/uapi/linux/openvswitch.h35
-rw-r--r--include/uapi/linux/pci_regs.h4
-rw-r--r--include/uapi/linux/perf_event.h18
-rw-r--r--include/uapi/linux/pkt_cls.h34
-rw-r--r--include/uapi/linux/pkt_sched.h70
-rw-r--r--include/uapi/linux/ppp-ioctl.h2
-rw-r--r--include/uapi/linux/ppp_defs.h18
-rw-r--r--include/uapi/linux/prctl.h4
-rw-r--r--include/uapi/linux/psp-sev.h3
-rw-r--r--include/uapi/linux/random.h4
-rw-r--r--include/uapi/linux/resource.h4
-rw-r--r--include/uapi/linux/rtc.h7
-rw-r--r--include/uapi/linux/rtnetlink.h18
-rw-r--r--include/uapi/linux/scc.h1
-rw-r--r--include/uapi/linux/sched.h70
-rw-r--r--include/uapi/linux/sctp.h31
-rw-r--r--include/uapi/linux/seccomp.h30
-rw-r--r--include/uapi/linux/sed-opal.h20
-rw-r--r--include/uapi/linux/sem.h4
-rw-r--r--include/uapi/linux/serial_core.h2
-rw-r--r--include/uapi/linux/serio.h10
-rw-r--r--include/uapi/linux/shm.h6
-rw-r--r--include/uapi/linux/snmp.h19
-rw-r--r--include/uapi/linux/stat.h2
-rw-r--r--include/uapi/linux/swab.h10
-rw-r--r--include/uapi/linux/switchtec_ioctl.h17
-rw-r--r--include/uapi/linux/sysctl.h2
-rw-r--r--include/uapi/linux/taskstats.h6
-rw-r--r--include/uapi/linux/tc_act/tc_tunnel_key.h29
-rw-r--r--include/uapi/linux/tcp.h16
-rw-r--r--include/uapi/linux/tee.h1
-rw-r--r--include/uapi/linux/time.h28
-rw-r--r--include/uapi/linux/time_types.h10
-rw-r--r--include/uapi/linux/timex.h2
-rw-r--r--include/uapi/linux/tipc.h22
-rw-r--r--include/uapi/linux/tipc_config.h4
-rw-r--r--include/uapi/linux/tipc_netlink.h6
-rw-r--r--include/uapi/linux/udp.h1
-rw-r--r--include/uapi/linux/usb/charger.h16
-rw-r--r--include/uapi/linux/usb/raw_gadget.h167
-rw-r--r--include/uapi/linux/utime.h4
-rw-r--r--include/uapi/linux/v4l2-controls.h7
-rw-r--r--include/uapi/linux/videodev2.h60
-rw-r--r--include/uapi/linux/virtio_ring.h2
-rw-r--r--include/uapi/linux/vm_sockets.h8
-rw-r--r--include/uapi/linux/wireguard.h196
-rw-r--r--include/uapi/misc/fastrpc.h15
-rw-r--r--include/uapi/misc/habanalabs.h48
-rw-r--r--include/uapi/misc/pvpanic.h9
-rw-r--r--include/uapi/rdma/cxgb3-abi.h82
-rw-r--r--include/uapi/rdma/efa-abi.h6
-rw-r--r--include/uapi/rdma/ib_user_ioctl_cmds.h15
-rw-r--r--include/uapi/rdma/ib_user_ioctl_verbs.h34
-rw-r--r--include/uapi/rdma/mlx5_user_ioctl_cmds.h18
-rw-r--r--include/uapi/rdma/nes-abi.h115
-rw-r--r--include/uapi/rdma/qedr-abi.h43
-rw-r--r--include/uapi/rdma/rdma_user_ioctl_cmds.h22
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h5
-rw-r--r--include/uapi/scsi/scsi_bsg_ufs.h3
-rw-r--r--include/uapi/sound/asoc.h1
-rw-r--r--include/uapi/sound/asound.h155
-rw-r--r--include/uapi/sound/compress_params.h10
-rw-r--r--include/uapi/sound/emu10k1.h42
-rw-r--r--include/uapi/sound/hdsp.h13
-rw-r--r--include/uapi/sound/hdspm.h10
-rw-r--r--include/uapi/sound/sof/abi.h2
-rw-r--r--include/uapi/sound/sof/tokens.h20
-rw-r--r--include/vdso/bits.h9
-rw-r--r--include/vdso/clocksource.h22
-rw-r--r--include/vdso/const.h10
-rw-r--r--include/vdso/datapage.h50
-rw-r--r--include/vdso/helpers.h2
-rw-r--r--include/vdso/jiffies.h11
-rw-r--r--include/vdso/ktime.h16
-rw-r--r--include/vdso/limits.h19
-rw-r--r--include/vdso/math64.h24
-rw-r--r--include/vdso/processor.h14
-rw-r--r--include/vdso/time.h12
-rw-r--r--include/vdso/time32.h17
-rw-r--r--include/vdso/time64.h14
-rw-r--r--include/video/mipi_display.h24
-rw-r--r--include/xen/interface/io/ring.h29
-rw-r--r--include/xen/interface/io/tpmif.h2
-rw-r--r--include/xen/interface/xen-mca.h10
-rw-r--r--include/xen/swiotlb-xen.h8
-rw-r--r--include/xen/xen-ops.h4
-rw-r--r--include/xen/xenbus.h4
1149 files changed, 37560 insertions, 15619 deletions
diff --git a/include/Kbuild b/include/Kbuild
deleted file mode 100644
index ffba79483cc5..000000000000
--- a/include/Kbuild
+++ /dev/null
@@ -1,1185 +0,0 @@
-# SPDX-License-Identifier: GPL-2.0-only
-
-# Add header-test-$(CONFIG_...) guard to headers that are only compiled
-# for particular architectures.
-#
-# Headers listed in header-test- are excluded from the test coverage.
-# Many headers are excluded for now because they fail to build. Please
-# consider to fix headers first before adding new ones to the blacklist.
-#
-# Sorted alphabetically.
-header-test- += acpi/acbuffer.h
-header-test- += acpi/acpi.h
-header-test- += acpi/acpi_bus.h
-header-test- += acpi/acpi_drivers.h
-header-test- += acpi/acpi_io.h
-header-test- += acpi/acpi_lpat.h
-header-test- += acpi/acpiosxf.h
-header-test- += acpi/acpixf.h
-header-test- += acpi/acrestyp.h
-header-test- += acpi/actbl.h
-header-test- += acpi/actbl1.h
-header-test- += acpi/actbl2.h
-header-test- += acpi/actbl3.h
-header-test- += acpi/actypes.h
-header-test- += acpi/battery.h
-header-test- += acpi/cppc_acpi.h
-header-test- += acpi/nfit.h
-header-test- += acpi/platform/acenv.h
-header-test- += acpi/platform/acenvex.h
-header-test- += acpi/platform/acintel.h
-header-test- += acpi/platform/aclinux.h
-header-test- += acpi/platform/aclinuxex.h
-header-test- += acpi/processor.h
-header-test-$(CONFIG_X86) += clocksource/hyperv_timer.h
-header-test- += clocksource/timer-sp804.h
-header-test- += crypto/cast_common.h
-header-test- += crypto/internal/cryptouser.h
-header-test- += crypto/pkcs7.h
-header-test- += crypto/poly1305.h
-header-test- += crypto/sha3.h
-header-test- += drm/ati_pcigart.h
-header-test- += drm/bridge/dw_hdmi.h
-header-test- += drm/bridge/dw_mipi_dsi.h
-header-test- += drm/drm_audio_component.h
-header-test- += drm/drm_auth.h
-header-test- += drm/drm_debugfs.h
-header-test- += drm/drm_debugfs_crc.h
-header-test- += drm/drm_displayid.h
-header-test- += drm/drm_encoder_slave.h
-header-test- += drm/drm_fb_cma_helper.h
-header-test- += drm/drm_fb_helper.h
-header-test- += drm/drm_fixed.h
-header-test- += drm/drm_format_helper.h
-header-test- += drm/drm_lease.h
-header-test- += drm/drm_legacy.h
-header-test- += drm/drm_panel.h
-header-test- += drm/drm_plane_helper.h
-header-test- += drm/drm_rect.h
-header-test- += drm/i915_component.h
-header-test- += drm/intel-gtt.h
-header-test- += drm/tinydrm/tinydrm-helpers.h
-header-test- += drm/ttm/ttm_debug.h
-header-test- += keys/asymmetric-parser.h
-header-test- += keys/asymmetric-subtype.h
-header-test- += keys/asymmetric-type.h
-header-test- += keys/big_key-type.h
-header-test- += keys/request_key_auth-type.h
-header-test- += keys/trusted.h
-header-test- += kvm/arm_arch_timer.h
-header-test- += kvm/arm_pmu.h
-header-test-$(CONFIG_ARM) += kvm/arm_psci.h
-header-test-$(CONFIG_ARM64) += kvm/arm_psci.h
-header-test- += kvm/arm_vgic.h
-header-test- += linux/8250_pci.h
-header-test- += linux/a.out.h
-header-test- += linux/adxl.h
-header-test- += linux/agpgart.h
-header-test- += linux/alcor_pci.h
-header-test- += linux/amba/clcd.h
-header-test- += linux/amba/pl080.h
-header-test- += linux/amd-iommu.h
-header-test-$(CONFIG_ARM) += linux/arm-cci.h
-header-test-$(CONFIG_ARM64) += linux/arm-cci.h
-header-test- += linux/arm_sdei.h
-header-test- += linux/asn1_decoder.h
-header-test- += linux/ata_platform.h
-header-test- += linux/ath9k_platform.h
-header-test- += linux/atm_tcp.h
-header-test- += linux/atomic-fallback.h
-header-test- += linux/avf/virtchnl.h
-header-test- += linux/bcm47xx_sprom.h
-header-test- += linux/bcma/bcma_driver_gmac_cmn.h
-header-test- += linux/bcma/bcma_driver_mips.h
-header-test- += linux/bcma/bcma_driver_pci.h
-header-test- += linux/bcma/bcma_driver_pcie2.h
-header-test- += linux/bit_spinlock.h
-header-test- += linux/blk-mq-rdma.h
-header-test- += linux/blk-mq.h
-header-test- += linux/blktrace_api.h
-header-test- += linux/blockgroup_lock.h
-header-test- += linux/bma150.h
-header-test- += linux/bpf_lirc.h
-header-test- += linux/bpf_types.h
-header-test- += linux/bsg-lib.h
-header-test- += linux/bsg.h
-header-test- += linux/btf.h
-header-test- += linux/btree-128.h
-header-test- += linux/btree-type.h
-header-test-$(CONFIG_CPU_BIG_ENDIAN) += linux/byteorder/big_endian.h
-header-test- += linux/byteorder/generic.h
-header-test-$(CONFIG_CPU_LITTLE_ENDIAN) += linux/byteorder/little_endian.h
-header-test- += linux/c2port.h
-header-test- += linux/can/dev/peak_canfd.h
-header-test- += linux/can/platform/cc770.h
-header-test- += linux/can/platform/sja1000.h
-header-test- += linux/ceph/ceph_features.h
-header-test- += linux/ceph/ceph_frag.h
-header-test- += linux/ceph/ceph_fs.h
-header-test- += linux/ceph/debugfs.h
-header-test- += linux/ceph/msgr.h
-header-test- += linux/ceph/rados.h
-header-test- += linux/cgroup_subsys.h
-header-test- += linux/clk/sunxi-ng.h
-header-test- += linux/clk/ti.h
-header-test- += linux/cn_proc.h
-header-test- += linux/coda_psdev.h
-header-test- += linux/compaction.h
-header-test- += linux/console_struct.h
-header-test- += linux/count_zeros.h
-header-test- += linux/cs5535.h
-header-test- += linux/cuda.h
-header-test- += linux/cyclades.h
-header-test- += linux/dcookies.h
-header-test- += linux/delayacct.h
-header-test- += linux/delayed_call.h
-header-test- += linux/device-mapper.h
-header-test- += linux/devpts_fs.h
-header-test- += linux/dio.h
-header-test- += linux/dirent.h
-header-test- += linux/dlm_plock.h
-header-test- += linux/dm-dirty-log.h
-header-test- += linux/dm-region-hash.h
-header-test- += linux/dma-debug.h
-header-test- += linux/dma/mmp-pdma.h
-header-test- += linux/dma/sprd-dma.h
-header-test- += linux/dns_resolver.h
-header-test- += linux/drbd_genl.h
-header-test- += linux/drbd_genl_api.h
-header-test- += linux/dw_apb_timer.h
-header-test- += linux/dynamic_debug.h
-header-test- += linux/dynamic_queue_limits.h
-header-test- += linux/ecryptfs.h
-header-test- += linux/edma.h
-header-test- += linux/eeprom_93cx6.h
-header-test- += linux/efs_vh.h
-header-test- += linux/elevator.h
-header-test- += linux/elfcore-compat.h
-header-test- += linux/error-injection.h
-header-test- += linux/errseq.h
-header-test- += linux/eventpoll.h
-header-test- += linux/ext2_fs.h
-header-test- += linux/f75375s.h
-header-test- += linux/falloc.h
-header-test- += linux/fault-inject.h
-header-test- += linux/fbcon.h
-header-test- += linux/firmware/intel/stratix10-svc-client.h
-header-test- += linux/firmware/meson/meson_sm.h
-header-test- += linux/firmware/trusted_foundations.h
-header-test- += linux/firmware/xlnx-zynqmp.h
-header-test- += linux/fixp-arith.h
-header-test- += linux/flat.h
-header-test- += linux/fs_types.h
-header-test- += linux/fs_uart_pd.h
-header-test- += linux/fsi-occ.h
-header-test- += linux/fsi-sbefifo.h
-header-test- += linux/fsl/bestcomm/ata.h
-header-test- += linux/fsl/bestcomm/bestcomm.h
-header-test- += linux/fsl/bestcomm/bestcomm_priv.h
-header-test- += linux/fsl/bestcomm/fec.h
-header-test- += linux/fsl/bestcomm/gen_bd.h
-header-test- += linux/fsl/bestcomm/sram.h
-header-test- += linux/fsl_hypervisor.h
-header-test- += linux/fsldma.h
-header-test- += linux/ftrace_irq.h
-header-test- += linux/gameport.h
-header-test- += linux/genl_magic_func.h
-header-test- += linux/genl_magic_struct.h
-header-test- += linux/gpio/aspeed.h
-header-test- += linux/gpio/gpio-reg.h
-header-test- += linux/hid-debug.h
-header-test- += linux/hiddev.h
-header-test- += linux/hippidevice.h
-header-test- += linux/hmm.h
-header-test- += linux/hp_sdc.h
-header-test- += linux/huge_mm.h
-header-test- += linux/hugetlb_cgroup.h
-header-test- += linux/hugetlb_inline.h
-header-test- += linux/hwmon-vid.h
-header-test- += linux/hyperv.h
-header-test- += linux/i2c-algo-pca.h
-header-test- += linux/i2c-algo-pcf.h
-header-test- += linux/i3c/ccc.h
-header-test- += linux/i3c/device.h
-header-test- += linux/i3c/master.h
-header-test- += linux/i8042.h
-header-test- += linux/ide.h
-header-test- += linux/idle_inject.h
-header-test- += linux/if_frad.h
-header-test- += linux/if_rmnet.h
-header-test- += linux/if_tap.h
-header-test- += linux/iio/accel/kxcjk_1013.h
-header-test- += linux/iio/adc/ad_sigma_delta.h
-header-test- += linux/iio/buffer-dma.h
-header-test- += linux/iio/buffer_impl.h
-header-test- += linux/iio/common/st_sensors.h
-header-test- += linux/iio/common/st_sensors_i2c.h
-header-test- += linux/iio/common/st_sensors_spi.h
-header-test- += linux/iio/dac/ad5421.h
-header-test- += linux/iio/dac/ad5504.h
-header-test- += linux/iio/dac/ad5791.h
-header-test- += linux/iio/dac/max517.h
-header-test- += linux/iio/dac/mcp4725.h
-header-test- += linux/iio/frequency/ad9523.h
-header-test- += linux/iio/frequency/adf4350.h
-header-test- += linux/iio/hw-consumer.h
-header-test- += linux/iio/imu/adis.h
-header-test- += linux/iio/sysfs.h
-header-test- += linux/iio/timer/stm32-timer-trigger.h
-header-test- += linux/iio/trigger.h
-header-test- += linux/iio/triggered_event.h
-header-test- += linux/imx-media.h
-header-test- += linux/inet_diag.h
-header-test- += linux/init_ohci1394_dma.h
-header-test- += linux/initrd.h
-header-test- += linux/input/adp5589.h
-header-test- += linux/input/bu21013.h
-header-test- += linux/input/cma3000.h
-header-test- += linux/input/kxtj9.h
-header-test- += linux/input/lm8333.h
-header-test- += linux/input/sparse-keymap.h
-header-test- += linux/input/touchscreen.h
-header-test- += linux/input/tps6507x-ts.h
-header-test-$(CONFIG_X86) += linux/intel-iommu.h
-header-test- += linux/intel-ish-client-if.h
-header-test- += linux/intel-pti.h
-header-test- += linux/intel-svm.h
-header-test- += linux/interconnect-provider.h
-header-test- += linux/ioc3.h
-header-test-$(CONFIG_BLOCK) += linux/iomap.h
-header-test- += linux/ipack.h
-header-test- += linux/irq_cpustat.h
-header-test- += linux/irq_poll.h
-header-test- += linux/irqchip/arm-gic-v3.h
-header-test- += linux/irqchip/arm-gic-v4.h
-header-test- += linux/irqchip/irq-madera.h
-header-test- += linux/irqchip/irq-sa11x0.h
-header-test- += linux/irqchip/mxs.h
-header-test- += linux/irqchip/versatile-fpga.h
-header-test- += linux/irqdesc.h
-header-test- += linux/irqflags.h
-header-test- += linux/iscsi_boot_sysfs.h
-header-test- += linux/isdn/capiutil.h
-header-test- += linux/isdn/hdlc.h
-header-test- += linux/isdn_ppp.h
-header-test- += linux/jbd2.h
-header-test- += linux/jump_label.h
-header-test- += linux/jump_label_ratelimit.h
-header-test- += linux/jz4740-adc.h
-header-test- += linux/kasan.h
-header-test- += linux/kcore.h
-header-test- += linux/kdev_t.h
-header-test- += linux/kernelcapi.h
-header-test- += linux/khugepaged.h
-header-test- += linux/kobj_map.h
-header-test- += linux/kobject_ns.h
-header-test- += linux/kvm_host.h
-header-test- += linux/kvm_irqfd.h
-header-test- += linux/kvm_para.h
-header-test- += linux/lantiq.h
-header-test- += linux/lapb.h
-header-test- += linux/latencytop.h
-header-test- += linux/led-lm3530.h
-header-test- += linux/leds-bd2802.h
-header-test- += linux/leds-lp3944.h
-header-test- += linux/leds-lp3952.h
-header-test- += linux/leds_pwm.h
-header-test- += linux/libata.h
-header-test- += linux/license.h
-header-test- += linux/lightnvm.h
-header-test- += linux/lis3lv02d.h
-header-test- += linux/list_bl.h
-header-test- += linux/list_lru.h
-header-test- += linux/list_nulls.h
-header-test- += linux/lockd/share.h
-header-test- += linux/lzo.h
-header-test- += linux/mailbox/zynqmp-ipi-message.h
-header-test- += linux/maple.h
-header-test- += linux/mbcache.h
-header-test- += linux/mbus.h
-header-test- += linux/mc146818rtc.h
-header-test- += linux/mc6821.h
-header-test- += linux/mdev.h
-header-test- += linux/mem_encrypt.h
-header-test- += linux/memfd.h
-header-test- += linux/mfd/88pm80x.h
-header-test- += linux/mfd/88pm860x.h
-header-test- += linux/mfd/abx500/ab8500-bm.h
-header-test- += linux/mfd/abx500/ab8500-gpadc.h
-header-test- += linux/mfd/adp5520.h
-header-test- += linux/mfd/arizona/pdata.h
-header-test- += linux/mfd/as3711.h
-header-test- += linux/mfd/as3722.h
-header-test- += linux/mfd/da903x.h
-header-test- += linux/mfd/da9055/pdata.h
-header-test- += linux/mfd/db8500-prcmu.h
-header-test- += linux/mfd/dbx500-prcmu.h
-header-test- += linux/mfd/dln2.h
-header-test- += linux/mfd/dm355evm_msp.h
-header-test- += linux/mfd/ds1wm.h
-header-test- += linux/mfd/ezx-pcap.h
-header-test- += linux/mfd/intel_msic.h
-header-test- += linux/mfd/janz.h
-header-test- += linux/mfd/kempld.h
-header-test- += linux/mfd/lm3533.h
-header-test- += linux/mfd/lp8788-isink.h
-header-test- += linux/mfd/lpc_ich.h
-header-test- += linux/mfd/max77693.h
-header-test- += linux/mfd/max8998-private.h
-header-test- += linux/mfd/menelaus.h
-header-test- += linux/mfd/mt6397/core.h
-header-test- += linux/mfd/palmas.h
-header-test- += linux/mfd/pcf50633/backlight.h
-header-test- += linux/mfd/rc5t583.h
-header-test- += linux/mfd/retu.h
-header-test- += linux/mfd/samsung/core.h
-header-test- += linux/mfd/si476x-platform.h
-header-test- += linux/mfd/si476x-reports.h
-header-test- += linux/mfd/sky81452.h
-header-test- += linux/mfd/smsc.h
-header-test- += linux/mfd/sta2x11-mfd.h
-header-test- += linux/mfd/stmfx.h
-header-test- += linux/mfd/tc3589x.h
-header-test- += linux/mfd/tc6387xb.h
-header-test- += linux/mfd/tc6393xb.h
-header-test- += linux/mfd/tps65090.h
-header-test- += linux/mfd/tps6586x.h
-header-test- += linux/mfd/tps65910.h
-header-test- += linux/mfd/tps80031.h
-header-test- += linux/mfd/ucb1x00.h
-header-test- += linux/mfd/viperboard.h
-header-test- += linux/mfd/wm831x/core.h
-header-test- += linux/mfd/wm831x/otp.h
-header-test- += linux/mfd/wm831x/pdata.h
-header-test- += linux/mfd/wm8994/core.h
-header-test- += linux/mfd/wm8994/pdata.h
-header-test- += linux/mlx4/doorbell.h
-header-test- += linux/mlx4/srq.h
-header-test- += linux/mlx5/doorbell.h
-header-test- += linux/mlx5/eq.h
-header-test- += linux/mlx5/fs_helpers.h
-header-test- += linux/mlx5/mlx5_ifc.h
-header-test- += linux/mlx5/mlx5_ifc_fpga.h
-header-test- += linux/mm-arch-hooks.h
-header-test- += linux/mm_inline.h
-header-test- += linux/mmu_context.h
-header-test- += linux/mpage.h
-header-test- += linux/mtd/bbm.h
-header-test- += linux/mtd/cfi.h
-header-test- += linux/mtd/doc2000.h
-header-test- += linux/mtd/flashchip.h
-header-test- += linux/mtd/ftl.h
-header-test- += linux/mtd/gen_probe.h
-header-test- += linux/mtd/jedec.h
-header-test- += linux/mtd/nand_bch.h
-header-test- += linux/mtd/nand_ecc.h
-header-test- += linux/mtd/ndfc.h
-header-test- += linux/mtd/onenand.h
-header-test- += linux/mtd/pismo.h
-header-test- += linux/mtd/plat-ram.h
-header-test- += linux/mtd/spi-nor.h
-header-test- += linux/mv643xx.h
-header-test- += linux/mv643xx_eth.h
-header-test- += linux/mvebu-pmsu.h
-header-test- += linux/mxm-wmi.h
-header-test- += linux/n_r3964.h
-header-test- += linux/ndctl.h
-header-test- += linux/nfs.h
-header-test- += linux/nfs_fs_i.h
-header-test- += linux/nfs_fs_sb.h
-header-test- += linux/nfs_page.h
-header-test- += linux/nfs_xdr.h
-header-test- += linux/nfsacl.h
-header-test- += linux/nl802154.h
-header-test- += linux/ns_common.h
-header-test- += linux/nsc_gpio.h
-header-test- += linux/ntb_transport.h
-header-test- += linux/nubus.h
-header-test- += linux/nvme-fc-driver.h
-header-test- += linux/nvme-fc.h
-header-test- += linux/nvme-rdma.h
-header-test- += linux/nvram.h
-header-test- += linux/objagg.h
-header-test- += linux/of_clk.h
-header-test- += linux/of_net.h
-header-test- += linux/of_pdt.h
-header-test- += linux/olpc-ec.h
-header-test- += linux/omap-dma.h
-header-test- += linux/omap-dmaengine.h
-header-test- += linux/omap-gpmc.h
-header-test- += linux/omap-iommu.h
-header-test- += linux/omap-mailbox.h
-header-test- += linux/once.h
-header-test- += linux/osq_lock.h
-header-test- += linux/overflow.h
-header-test- += linux/page-flags-layout.h
-header-test- += linux/page-isolation.h
-header-test- += linux/page_ext.h
-header-test- += linux/page_owner.h
-header-test- += linux/parport_pc.h
-header-test- += linux/parser.h
-header-test- += linux/pci-acpi.h
-header-test- += linux/pci-dma-compat.h
-header-test- += linux/pci_hotplug.h
-header-test- += linux/pda_power.h
-header-test- += linux/perf/arm_pmu.h
-header-test- += linux/perf_regs.h
-header-test- += linux/phy/omap_control_phy.h
-header-test- += linux/phy/tegra/xusb.h
-header-test- += linux/phy/ulpi_phy.h
-header-test- += linux/phy_fixed.h
-header-test- += linux/pipe_fs_i.h
-header-test- += linux/pktcdvd.h
-header-test- += linux/pl320-ipc.h
-header-test- += linux/pl353-smc.h
-header-test- += linux/platform_data/ad5449.h
-header-test- += linux/platform_data/ad5755.h
-header-test- += linux/platform_data/ad7266.h
-header-test- += linux/platform_data/ad7291.h
-header-test- += linux/platform_data/ad7298.h
-header-test- += linux/platform_data/ad7303.h
-header-test- += linux/platform_data/ad7791.h
-header-test- += linux/platform_data/ad7793.h
-header-test- += linux/platform_data/ad7887.h
-header-test- += linux/platform_data/adau17x1.h
-header-test- += linux/platform_data/adp8870.h
-header-test- += linux/platform_data/ads1015.h
-header-test- += linux/platform_data/ads7828.h
-header-test- += linux/platform_data/apds990x.h
-header-test- += linux/platform_data/arm-ux500-pm.h
-header-test- += linux/platform_data/asoc-s3c.h
-header-test- += linux/platform_data/at91_adc.h
-header-test- += linux/platform_data/ata-pxa.h
-header-test- += linux/platform_data/atmel.h
-header-test- += linux/platform_data/bh1770glc.h
-header-test- += linux/platform_data/brcmfmac.h
-header-test- += linux/platform_data/cros_ec_commands.h
-header-test- += linux/platform_data/clk-u300.h
-header-test- += linux/platform_data/cyttsp4.h
-header-test- += linux/platform_data/dma-coh901318.h
-header-test- += linux/platform_data/dma-imx-sdma.h
-header-test- += linux/platform_data/dma-mcf-edma.h
-header-test- += linux/platform_data/dma-s3c24xx.h
-header-test- += linux/platform_data/dmtimer-omap.h
-header-test- += linux/platform_data/dsa.h
-header-test- += linux/platform_data/edma.h
-header-test- += linux/platform_data/elm.h
-header-test- += linux/platform_data/emif_plat.h
-header-test- += linux/platform_data/fsa9480.h
-header-test- += linux/platform_data/g762.h
-header-test- += linux/platform_data/gpio-ath79.h
-header-test- += linux/platform_data/gpio-davinci.h
-header-test- += linux/platform_data/gpio-dwapb.h
-header-test- += linux/platform_data/gpio-htc-egpio.h
-header-test- += linux/platform_data/gpmc-omap.h
-header-test- += linux/platform_data/hsmmc-omap.h
-header-test- += linux/platform_data/hwmon-s3c.h
-header-test- += linux/platform_data/i2c-davinci.h
-header-test- += linux/platform_data/i2c-imx.h
-header-test- += linux/platform_data/i2c-mux-reg.h
-header-test- += linux/platform_data/i2c-ocores.h
-header-test- += linux/platform_data/i2c-xiic.h
-header-test- += linux/platform_data/intel-spi.h
-header-test- += linux/platform_data/invensense_mpu6050.h
-header-test- += linux/platform_data/irda-pxaficp.h
-header-test- += linux/platform_data/irda-sa11x0.h
-header-test- += linux/platform_data/itco_wdt.h
-header-test- += linux/platform_data/jz4740/jz4740_nand.h
-header-test- += linux/platform_data/keyboard-pxa930_rotary.h
-header-test- += linux/platform_data/keypad-omap.h
-header-test- += linux/platform_data/leds-lp55xx.h
-header-test- += linux/platform_data/leds-omap.h
-header-test- += linux/platform_data/lp855x.h
-header-test- += linux/platform_data/lp8727.h
-header-test- += linux/platform_data/max197.h
-header-test- += linux/platform_data/max3421-hcd.h
-header-test- += linux/platform_data/max732x.h
-header-test- += linux/platform_data/mcs.h
-header-test- += linux/platform_data/mdio-bcm-unimac.h
-header-test- += linux/platform_data/mdio-gpio.h
-header-test- += linux/platform_data/media/si4713.h
-header-test- += linux/platform_data/mlxreg.h
-header-test- += linux/platform_data/mmc-omap.h
-header-test- += linux/platform_data/mmc-sdhci-s3c.h
-header-test- += linux/platform_data/mmp_audio.h
-header-test- += linux/platform_data/mtd-orion_nand.h
-header-test- += linux/platform_data/mv88e6xxx.h
-header-test- += linux/platform_data/net-cw1200.h
-header-test- += linux/platform_data/omap-twl4030.h
-header-test- += linux/platform_data/omapdss.h
-header-test- += linux/platform_data/pcf857x.h
-header-test- += linux/platform_data/pixcir_i2c_ts.h
-header-test- += linux/platform_data/pwm_omap_dmtimer.h
-header-test- += linux/platform_data/pxa2xx_udc.h
-header-test- += linux/platform_data/pxa_sdhci.h
-header-test- += linux/platform_data/remoteproc-omap.h
-header-test- += linux/platform_data/sa11x0-serial.h
-header-test- += linux/platform_data/sc18is602.h
-header-test- += linux/platform_data/sdhci-pic32.h
-header-test- += linux/platform_data/serial-sccnxp.h
-header-test- += linux/platform_data/sht3x.h
-header-test- += linux/platform_data/shtc1.h
-header-test- += linux/platform_data/si5351.h
-header-test- += linux/platform_data/sky81452-backlight.h
-header-test- += linux/platform_data/spi-davinci.h
-header-test- += linux/platform_data/spi-ep93xx.h
-header-test- += linux/platform_data/spi-mt65xx.h
-header-test- += linux/platform_data/st_sensors_pdata.h
-header-test- += linux/platform_data/ti-sysc.h
-header-test- += linux/platform_data/timer-ixp4xx.h
-header-test- += linux/platform_data/touchscreen-s3c2410.h
-header-test- += linux/platform_data/tsc2007.h
-header-test- += linux/platform_data/tsl2772.h
-header-test- += linux/platform_data/uio_pruss.h
-header-test- += linux/platform_data/usb-davinci.h
-header-test- += linux/platform_data/usb-ehci-mxc.h
-header-test- += linux/platform_data/usb-ehci-orion.h
-header-test- += linux/platform_data/usb-mx2.h
-header-test- += linux/platform_data/usb-ohci-s3c2410.h
-header-test- += linux/platform_data/usb-omap.h
-header-test- += linux/platform_data/usb-s3c2410_udc.h
-header-test- += linux/platform_data/usb3503.h
-header-test- += linux/platform_data/ux500_wdt.h
-header-test- += linux/platform_data/video-clcd-versatile.h
-header-test- += linux/platform_data/video-imxfb.h
-header-test- += linux/platform_data/video-pxafb.h
-header-test- += linux/platform_data/video_s3c.h
-header-test- += linux/platform_data/voltage-omap.h
-header-test- += linux/platform_data/x86/apple.h
-header-test- += linux/platform_data/x86/clk-pmc-atom.h
-header-test- += linux/platform_data/x86/pmc_atom.h
-header-test- += linux/platform_data/xtalk-bridge.h
-header-test- += linux/pm2301_charger.h
-header-test- += linux/pm_wakeirq.h
-header-test- += linux/pm_wakeup.h
-header-test- += linux/pmbus.h
-header-test- += linux/pmu.h
-header-test- += linux/posix_acl.h
-header-test- += linux/posix_acl_xattr.h
-header-test- += linux/power/ab8500.h
-header-test- += linux/power/bq27xxx_battery.h
-header-test- += linux/power/generic-adc-battery.h
-header-test- += linux/power/jz4740-battery.h
-header-test- += linux/power/max17042_battery.h
-header-test- += linux/power/max8903_charger.h
-header-test- += linux/ppp-comp.h
-header-test- += linux/pps-gpio.h
-header-test- += linux/pr.h
-header-test- += linux/proc_ns.h
-header-test- += linux/processor.h
-header-test- += linux/psi.h
-header-test- += linux/psp-sev.h
-header-test- += linux/pstore.h
-header-test- += linux/ptr_ring.h
-header-test- += linux/ptrace.h
-header-test- += linux/qcom-geni-se.h
-header-test- += linux/qed/eth_common.h
-header-test- += linux/qed/fcoe_common.h
-header-test- += linux/qed/iscsi_common.h
-header-test- += linux/qed/iwarp_common.h
-header-test- += linux/qed/qed_eth_if.h
-header-test- += linux/qed/qed_fcoe_if.h
-header-test- += linux/qed/rdma_common.h
-header-test- += linux/qed/storage_common.h
-header-test- += linux/qed/tcp_common.h
-header-test- += linux/qnx6_fs.h
-header-test- += linux/quicklist.h
-header-test- += linux/ramfs.h
-header-test- += linux/range.h
-header-test- += linux/rcu_node_tree.h
-header-test- += linux/rculist_bl.h
-header-test- += linux/rculist_nulls.h
-header-test- += linux/rcutiny.h
-header-test- += linux/rcutree.h
-header-test- += linux/reboot-mode.h
-header-test- += linux/regulator/fixed.h
-header-test- += linux/regulator/gpio-regulator.h
-header-test- += linux/regulator/max8973-regulator.h
-header-test- += linux/regulator/of_regulator.h
-header-test- += linux/regulator/tps51632-regulator.h
-header-test- += linux/regulator/tps62360.h
-header-test- += linux/regulator/tps6507x.h
-header-test- += linux/regulator/userspace-consumer.h
-header-test- += linux/remoteproc/st_slim_rproc.h
-header-test- += linux/reset/socfpga.h
-header-test- += linux/reset/sunxi.h
-header-test- += linux/rtc/m48t59.h
-header-test- += linux/rtc/rtc-omap.h
-header-test- += linux/rtc/sirfsoc_rtciobrg.h
-header-test- += linux/rwlock.h
-header-test- += linux/rwlock_types.h
-header-test- += linux/scc.h
-header-test- += linux/sched/deadline.h
-header-test- += linux/sched/smt.h
-header-test- += linux/sched/sysctl.h
-header-test- += linux/sched_clock.h
-header-test- += linux/scpi_protocol.h
-header-test- += linux/scx200_gpio.h
-header-test- += linux/seccomp.h
-header-test- += linux/sed-opal.h
-header-test- += linux/seg6_iptunnel.h
-header-test- += linux/selection.h
-header-test- += linux/set_memory.h
-header-test- += linux/shrinker.h
-header-test- += linux/sirfsoc_dma.h
-header-test- += linux/skb_array.h
-header-test- += linux/slab_def.h
-header-test- += linux/slub_def.h
-header-test- += linux/sm501.h
-header-test- += linux/smc91x.h
-header-test- += linux/static_key.h
-header-test- += linux/soc/actions/owl-sps.h
-header-test- += linux/soc/amlogic/meson-canvas.h
-header-test- += linux/soc/brcmstb/brcmstb.h
-header-test- += linux/soc/ixp4xx/npe.h
-header-test- += linux/soc/mediatek/infracfg.h
-header-test- += linux/soc/qcom/smd-rpm.h
-header-test- += linux/soc/qcom/smem.h
-header-test- += linux/soc/qcom/smem_state.h
-header-test- += linux/soc/qcom/wcnss_ctrl.h
-header-test- += linux/soc/renesas/rcar-rst.h
-header-test- += linux/soc/samsung/exynos-pmu.h
-header-test- += linux/soc/sunxi/sunxi_sram.h
-header-test- += linux/soc/ti/ti-msgmgr.h
-header-test- += linux/soc/ti/ti_sci_inta_msi.h
-header-test- += linux/soc/ti/ti_sci_protocol.h
-header-test- += linux/soundwire/sdw.h
-header-test- += linux/soundwire/sdw_intel.h
-header-test- += linux/soundwire/sdw_type.h
-header-test- += linux/spi/ad7877.h
-header-test- += linux/spi/ads7846.h
-header-test- += linux/spi/at86rf230.h
-header-test- += linux/spi/ds1305.h
-header-test- += linux/spi/libertas_spi.h
-header-test- += linux/spi/lms283gf05.h
-header-test- += linux/spi/max7301.h
-header-test- += linux/spi/mcp23s08.h
-header-test- += linux/spi/rspi.h
-header-test- += linux/spi/s3c24xx.h
-header-test- += linux/spi/sh_msiof.h
-header-test- += linux/spi/spi-fsl-dspi.h
-header-test- += linux/spi/spi_bitbang.h
-header-test- += linux/spi/spi_gpio.h
-header-test- += linux/spi/xilinx_spi.h
-header-test- += linux/spinlock_api_smp.h
-header-test- += linux/spinlock_api_up.h
-header-test- += linux/spinlock_types.h
-header-test- += linux/splice.h
-header-test- += linux/sram.h
-header-test- += linux/srcutiny.h
-header-test- += linux/srcutree.h
-header-test- += linux/ssb/ssb_driver_chipcommon.h
-header-test- += linux/ssb/ssb_driver_extif.h
-header-test- += linux/ssb/ssb_driver_mips.h
-header-test- += linux/ssb/ssb_driver_pci.h
-header-test- += linux/ssbi.h
-header-test- += linux/stackdepot.h
-header-test- += linux/stmp3xxx_rtc_wdt.h
-header-test- += linux/string_helpers.h
-header-test- += linux/sungem_phy.h
-header-test- += linux/sunrpc/msg_prot.h
-header-test- += linux/sunrpc/rpc_pipe_fs.h
-header-test- += linux/sunrpc/xprtmultipath.h
-header-test- += linux/sunrpc/xprtsock.h
-header-test- += linux/sunxi-rsb.h
-header-test- += linux/svga.h
-header-test- += linux/sw842.h
-header-test- += linux/swapfile.h
-header-test- += linux/swapops.h
-header-test- += linux/swiotlb.h
-header-test- += linux/sysv_fs.h
-header-test- += linux/t10-pi.h
-header-test- += linux/task_io_accounting.h
-header-test- += linux/tick.h
-header-test- += linux/timb_dma.h
-header-test- += linux/timekeeping.h
-header-test- += linux/timekeeping32.h
-header-test- += linux/ts-nbus.h
-header-test- += linux/tsacct_kern.h
-header-test- += linux/tty_flip.h
-header-test- += linux/tty_ldisc.h
-header-test- += linux/ucb1400.h
-header-test- += linux/usb/association.h
-header-test- += linux/usb/cdc-wdm.h
-header-test- += linux/usb/cdc_ncm.h
-header-test- += linux/usb/ezusb.h
-header-test- += linux/usb/gadget_configfs.h
-header-test- += linux/usb/gpio_vbus.h
-header-test- += linux/usb/hcd.h
-header-test- += linux/usb/iowarrior.h
-header-test- += linux/usb/irda.h
-header-test- += linux/usb/isp116x.h
-header-test- += linux/usb/isp1362.h
-header-test- += linux/usb/musb.h
-header-test- += linux/usb/net2280.h
-header-test- += linux/usb/ohci_pdriver.h
-header-test- += linux/usb/otg-fsm.h
-header-test- += linux/usb/pd_ado.h
-header-test- += linux/usb/r8a66597.h
-header-test- += linux/usb/rndis_host.h
-header-test- += linux/usb/serial.h
-header-test- += linux/usb/sl811.h
-header-test- += linux/usb/storage.h
-header-test- += linux/usb/uas.h
-header-test- += linux/usb/usb338x.h
-header-test- += linux/usb/usbnet.h
-header-test- += linux/usb/wusb-wa.h
-header-test- += linux/usb/xhci-dbgp.h
-header-test- += linux/usb_usual.h
-header-test- += linux/user-return-notifier.h
-header-test- += linux/userfaultfd_k.h
-header-test- += linux/verification.h
-header-test- += linux/vgaarb.h
-header-test- += linux/via_core.h
-header-test- += linux/via_i2c.h
-header-test- += linux/virtio_byteorder.h
-header-test- += linux/virtio_ring.h
-header-test- += linux/visorbus.h
-header-test- += linux/vme.h
-header-test- += linux/vmstat.h
-header-test- += linux/vmw_vmci_api.h
-header-test- += linux/vmw_vmci_defs.h
-header-test- += linux/vringh.h
-header-test- += linux/vt_buffer.h
-header-test- += linux/zorro.h
-header-test- += linux/zpool.h
-header-test- += math-emu/double.h
-header-test- += math-emu/op-common.h
-header-test- += math-emu/quad.h
-header-test- += math-emu/single.h
-header-test- += math-emu/soft-fp.h
-header-test- += media/davinci/dm355_ccdc.h
-header-test- += media/davinci/dm644x_ccdc.h
-header-test- += media/davinci/isif.h
-header-test- += media/davinci/vpbe_osd.h
-header-test- += media/davinci/vpbe_types.h
-header-test- += media/davinci/vpif_types.h
-header-test- += media/demux.h
-header-test- += media/drv-intf/soc_mediabus.h
-header-test- += media/dvb_net.h
-header-test- += media/fwht-ctrls.h
-header-test- += media/i2c/ad9389b.h
-header-test- += media/i2c/adv7343.h
-header-test- += media/i2c/adv7511.h
-header-test- += media/i2c/adv7842.h
-header-test- += media/i2c/m5mols.h
-header-test- += media/i2c/mt9m032.h
-header-test- += media/i2c/mt9t112.h
-header-test- += media/i2c/mt9v032.h
-header-test- += media/i2c/ov2659.h
-header-test- += media/i2c/ov7670.h
-header-test- += media/i2c/rj54n1cb0c.h
-header-test- += media/i2c/saa6588.h
-header-test- += media/i2c/saa7115.h
-header-test- += media/i2c/sr030pc30.h
-header-test- += media/i2c/tc358743.h
-header-test- += media/i2c/tda1997x.h
-header-test- += media/i2c/ths7303.h
-header-test- += media/i2c/tvaudio.h
-header-test- += media/i2c/tvp514x.h
-header-test- += media/i2c/tvp7002.h
-header-test- += media/i2c/wm8775.h
-header-test- += media/imx.h
-header-test- += media/media-dev-allocator.h
-header-test- += media/mpeg2-ctrls.h
-header-test- += media/rcar-fcp.h
-header-test- += media/tuner-types.h
-header-test- += media/tveeprom.h
-header-test- += media/v4l2-flash-led-class.h
-header-test- += misc/altera.h
-header-test- += misc/cxl-base.h
-header-test- += misc/cxllib.h
-header-test- += net/9p/9p.h
-header-test- += net/9p/client.h
-header-test- += net/9p/transport.h
-header-test- += net/af_vsock.h
-header-test- += net/ax88796.h
-header-test- += net/bluetooth/hci.h
-header-test- += net/bluetooth/hci_core.h
-header-test- += net/bluetooth/hci_mon.h
-header-test- += net/bluetooth/hci_sock.h
-header-test- += net/bluetooth/l2cap.h
-header-test- += net/bluetooth/mgmt.h
-header-test- += net/bluetooth/rfcomm.h
-header-test- += net/bluetooth/sco.h
-header-test- += net/bond_options.h
-header-test- += net/caif/cfsrvl.h
-header-test- += net/codel_impl.h
-header-test- += net/codel_qdisc.h
-header-test- += net/compat.h
-header-test- += net/datalink.h
-header-test- += net/dcbevent.h
-header-test- += net/dcbnl.h
-header-test- += net/dn_dev.h
-header-test- += net/dn_fib.h
-header-test- += net/dn_neigh.h
-header-test- += net/dn_nsp.h
-header-test- += net/dn_route.h
-header-test- += net/erspan.h
-header-test- += net/esp.h
-header-test- += net/ethoc.h
-header-test- += net/firewire.h
-header-test- += net/flow_offload.h
-header-test- += net/fq.h
-header-test- += net/fq_impl.h
-header-test- += net/garp.h
-header-test- += net/gtp.h
-header-test- += net/gue.h
-header-test- += net/hwbm.h
-header-test- += net/ila.h
-header-test- += net/inet6_connection_sock.h
-header-test- += net/inet_common.h
-header-test- += net/inet_frag.h
-header-test- += net/ip6_route.h
-header-test- += net/ip_vs.h
-header-test- += net/ipcomp.h
-header-test- += net/ipconfig.h
-header-test- += net/iucv/af_iucv.h
-header-test- += net/iucv/iucv.h
-header-test- += net/lapb.h
-header-test- += net/llc_c_ac.h
-header-test- += net/llc_c_st.h
-header-test- += net/llc_s_ac.h
-header-test- += net/llc_s_ev.h
-header-test- += net/llc_s_st.h
-header-test- += net/mpls_iptunnel.h
-header-test- += net/mrp.h
-header-test- += net/ncsi.h
-header-test- += net/netevent.h
-header-test- += net/netns/can.h
-header-test- += net/netns/generic.h
-header-test- += net/netns/ieee802154_6lowpan.h
-header-test- += net/netns/ipv4.h
-header-test- += net/netns/ipv6.h
-header-test- += net/netns/mpls.h
-header-test- += net/netns/nftables.h
-header-test- += net/netns/sctp.h
-header-test- += net/netrom.h
-header-test- += net/p8022.h
-header-test- += net/phonet/pep.h
-header-test- += net/phonet/phonet.h
-header-test- += net/phonet/pn_dev.h
-header-test- += net/pptp.h
-header-test- += net/psample.h
-header-test- += net/psnap.h
-header-test- += net/regulatory.h
-header-test- += net/rose.h
-header-test- += net/sctp/auth.h
-header-test- += net/sctp/stream_interleave.h
-header-test- += net/sctp/stream_sched.h
-header-test- += net/sctp/tsnmap.h
-header-test- += net/sctp/ulpevent.h
-header-test- += net/sctp/ulpqueue.h
-header-test- += net/secure_seq.h
-header-test- += net/smc.h
-header-test- += net/stp.h
-header-test- += net/transp_v6.h
-header-test- += net/tun_proto.h
-header-test- += net/udplite.h
-header-test- += net/xdp.h
-header-test- += net/xdp_priv.h
-header-test- += pcmcia/cistpl.h
-header-test- += pcmcia/ds.h
-header-test- += rdma/tid_rdma_defs.h
-header-test- += scsi/fc/fc_encaps.h
-header-test- += scsi/fc/fc_fc2.h
-header-test- += scsi/fc/fc_fcoe.h
-header-test- += scsi/fc/fc_fip.h
-header-test- += scsi/fc_encode.h
-header-test- += scsi/fc_frame.h
-header-test- += scsi/iser.h
-header-test- += scsi/libfc.h
-header-test- += scsi/libfcoe.h
-header-test- += scsi/libsas.h
-header-test- += scsi/sas_ata.h
-header-test- += scsi/scsi_cmnd.h
-header-test- += scsi/scsi_dbg.h
-header-test- += scsi/scsi_device.h
-header-test- += scsi/scsi_dh.h
-header-test- += scsi/scsi_eh.h
-header-test- += scsi/scsi_host.h
-header-test- += scsi/scsi_ioctl.h
-header-test- += scsi/scsi_request.h
-header-test- += scsi/scsi_tcq.h
-header-test- += scsi/scsi_transport.h
-header-test- += scsi/scsi_transport_fc.h
-header-test- += scsi/scsi_transport_sas.h
-header-test- += scsi/scsi_transport_spi.h
-header-test- += scsi/scsi_transport_srp.h
-header-test- += scsi/scsicam.h
-header-test- += scsi/sg.h
-header-test- += soc/arc/aux.h
-header-test- += soc/arc/mcip.h
-header-test- += soc/arc/timers.h
-header-test- += soc/brcmstb/common.h
-header-test- += soc/fsl/bman.h
-header-test- += soc/fsl/qe/qe.h
-header-test- += soc/fsl/qe/qe_ic.h
-header-test- += soc/fsl/qe/qe_tdm.h
-header-test- += soc/fsl/qe/ucc.h
-header-test- += soc/fsl/qe/ucc_fast.h
-header-test- += soc/fsl/qe/ucc_slow.h
-header-test- += soc/fsl/qman.h
-header-test- += soc/nps/common.h
-header-test-$(CONFIG_ARC) += soc/nps/mtm.h
-header-test- += soc/qcom/cmd-db.h
-header-test- += soc/qcom/rpmh.h
-header-test- += soc/qcom/tcs.h
-header-test- += soc/tegra/ahb.h
-header-test- += soc/tegra/bpmp-abi.h
-header-test- += soc/tegra/common.h
-header-test- += soc/tegra/flowctrl.h
-header-test- += soc/tegra/fuse.h
-header-test- += soc/tegra/mc.h
-header-test- += sound/ac97/compat.h
-header-test- += sound/aci.h
-header-test- += sound/ad1843.h
-header-test- += sound/adau1373.h
-header-test- += sound/ak4113.h
-header-test- += sound/ak4114.h
-header-test- += sound/ak4117.h
-header-test- += sound/cs35l33.h
-header-test- += sound/cs35l34.h
-header-test- += sound/cs35l35.h
-header-test- += sound/cs35l36.h
-header-test- += sound/cs4271.h
-header-test- += sound/cs42l52.h
-header-test- += sound/cs8427.h
-header-test- += sound/da7218.h
-header-test- += sound/da7219-aad.h
-header-test- += sound/da7219.h
-header-test- += sound/da9055.h
-header-test- += sound/emu8000.h
-header-test- += sound/emux_synth.h
-header-test- += sound/hda_component.h
-header-test- += sound/hda_hwdep.h
-header-test- += sound/hda_i915.h
-header-test- += sound/hwdep.h
-header-test- += sound/i2c.h
-header-test- += sound/l3.h
-header-test- += sound/max98088.h
-header-test- += sound/max98095.h
-header-test- += sound/mixer_oss.h
-header-test- += sound/omap-hdmi-audio.h
-header-test- += sound/pcm_drm_eld.h
-header-test- += sound/pcm_iec958.h
-header-test- += sound/pcm_oss.h
-header-test- += sound/pxa2xx-lib.h
-header-test- += sound/rt286.h
-header-test- += sound/rt298.h
-header-test- += sound/rt5645.h
-header-test- += sound/rt5659.h
-header-test- += sound/rt5660.h
-header-test- += sound/rt5665.h
-header-test- += sound/rt5670.h
-header-test- += sound/s3c24xx_uda134x.h
-header-test- += sound/seq_device.h
-header-test- += sound/seq_kernel.h
-header-test- += sound/seq_midi_emul.h
-header-test- += sound/seq_oss.h
-header-test- += sound/soc-acpi-intel-match.h
-header-test- += sound/soc-dai.h
-header-test- += sound/soc-dapm.h
-header-test- += sound/soc-dpcm.h
-header-test- += sound/sof/control.h
-header-test- += sound/sof/dai-intel.h
-header-test- += sound/sof/dai.h
-header-test- += sound/sof/header.h
-header-test- += sound/sof/info.h
-header-test- += sound/sof/pm.h
-header-test- += sound/sof/stream.h
-header-test- += sound/sof/topology.h
-header-test- += sound/sof/trace.h
-header-test- += sound/sof/xtensa.h
-header-test- += sound/spear_spdif.h
-header-test- += sound/sta32x.h
-header-test- += sound/sta350.h
-header-test- += sound/tea6330t.h
-header-test- += sound/tlv320aic32x4.h
-header-test- += sound/tlv320dac33-plat.h
-header-test- += sound/uda134x.h
-header-test- += sound/wavefront.h
-header-test- += sound/wm8903.h
-header-test- += sound/wm8904.h
-header-test- += sound/wm8960.h
-header-test- += sound/wm8962.h
-header-test- += sound/wm8993.h
-header-test- += sound/wm8996.h
-header-test- += sound/wm9081.h
-header-test- += sound/wm9090.h
-header-test- += target/iscsi/iscsi_target_stat.h
-header-test- += trace/bpf_probe.h
-header-test- += trace/events/9p.h
-header-test- += trace/events/afs.h
-header-test- += trace/events/asoc.h
-header-test- += trace/events/bcache.h
-header-test- += trace/events/block.h
-header-test- += trace/events/cachefiles.h
-header-test- += trace/events/cgroup.h
-header-test- += trace/events/clk.h
-header-test- += trace/events/cma.h
-header-test- += trace/events/ext4.h
-header-test- += trace/events/f2fs.h
-header-test- += trace/events/fs_dax.h
-header-test- += trace/events/fscache.h
-header-test- += trace/events/fsi.h
-header-test- += trace/events/fsi_master_ast_cf.h
-header-test- += trace/events/fsi_master_gpio.h
-header-test- += trace/events/huge_memory.h
-header-test- += trace/events/ib_mad.h
-header-test- += trace/events/ib_umad.h
-header-test- += trace/events/iscsi.h
-header-test- += trace/events/jbd2.h
-header-test- += trace/events/kvm.h
-header-test- += trace/events/kyber.h
-header-test- += trace/events/libata.h
-header-test- += trace/events/mce.h
-header-test- += trace/events/mdio.h
-header-test- += trace/events/migrate.h
-header-test- += trace/events/mmflags.h
-header-test- += trace/events/nbd.h
-header-test- += trace/events/nilfs2.h
-header-test- += trace/events/pwc.h
-header-test- += trace/events/rdma.h
-header-test- += trace/events/rpcgss.h
-header-test- += trace/events/rpcrdma.h
-header-test- += trace/events/rxrpc.h
-header-test- += trace/events/scsi.h
-header-test- += trace/events/siox.h
-header-test- += trace/events/spi.h
-header-test- += trace/events/swiotlb.h
-header-test- += trace/events/syscalls.h
-header-test- += trace/events/target.h
-header-test- += trace/events/thermal_power_allocator.h
-header-test- += trace/events/timer.h
-header-test- += trace/events/wbt.h
-header-test- += trace/events/xen.h
-header-test- += trace/perf.h
-header-test- += trace/trace_events.h
-header-test- += uapi/drm/vmwgfx_drm.h
-header-test- += uapi/linux/a.out.h
-header-test- += uapi/linux/coda.h
-header-test- += uapi/linux/coda_psdev.h
-header-test- += uapi/linux/errqueue.h
-header-test- += uapi/linux/eventpoll.h
-header-test- += uapi/linux/hdlc/ioctl.h
-header-test- += uapi/linux/input.h
-header-test- += uapi/linux/kvm.h
-header-test- += uapi/linux/kvm_para.h
-header-test- += uapi/linux/lightnvm.h
-header-test- += uapi/linux/mic_common.h
-header-test- += uapi/linux/mman.h
-header-test- += uapi/linux/nilfs2_ondisk.h
-header-test- += uapi/linux/patchkey.h
-header-test- += uapi/linux/ptrace.h
-header-test- += uapi/linux/scc.h
-header-test- += uapi/linux/seg6_iptunnel.h
-header-test- += uapi/linux/smc_diag.h
-header-test- += uapi/linux/timex.h
-header-test- += uapi/linux/videodev2.h
-header-test- += uapi/scsi/scsi_bsg_fc.h
-header-test- += uapi/sound/asound.h
-header-test- += uapi/sound/sof/eq.h
-header-test- += uapi/sound/sof/fw.h
-header-test- += uapi/sound/sof/header.h
-header-test- += uapi/sound/sof/manifest.h
-header-test- += uapi/sound/sof/trace.h
-header-test- += uapi/xen/evtchn.h
-header-test- += uapi/xen/gntdev.h
-header-test- += uapi/xen/privcmd.h
-header-test- += vdso/vsyscall.h
-header-test- += video/broadsheetfb.h
-header-test- += video/cvisionppc.h
-header-test- += video/gbe.h
-header-test- += video/kyro.h
-header-test- += video/maxinefb.h
-header-test- += video/metronomefb.h
-header-test- += video/neomagic.h
-header-test- += video/of_display_timing.h
-header-test- += video/omapvrfb.h
-header-test- += video/s1d13xxxfb.h
-header-test- += video/sstfb.h
-header-test- += video/tgafb.h
-header-test- += video/udlfb.h
-header-test- += video/uvesafb.h
-header-test- += video/vga.h
-header-test- += video/w100fb.h
-header-test- += xen/acpi.h
-header-test- += xen/arm/hypercall.h
-header-test- += xen/arm/page-coherent.h
-header-test- += xen/arm/page.h
-header-test- += xen/balloon.h
-header-test- += xen/events.h
-header-test- += xen/features.h
-header-test- += xen/grant_table.h
-header-test- += xen/hvm.h
-header-test- += xen/interface/callback.h
-header-test- += xen/interface/event_channel.h
-header-test- += xen/interface/grant_table.h
-header-test- += xen/interface/hvm/dm_op.h
-header-test- += xen/interface/hvm/hvm_op.h
-header-test- += xen/interface/hvm/hvm_vcpu.h
-header-test- += xen/interface/hvm/params.h
-header-test- += xen/interface/hvm/start_info.h
-header-test- += xen/interface/io/9pfs.h
-header-test- += xen/interface/io/blkif.h
-header-test- += xen/interface/io/console.h
-header-test- += xen/interface/io/displif.h
-header-test- += xen/interface/io/fbif.h
-header-test- += xen/interface/io/kbdif.h
-header-test- += xen/interface/io/netif.h
-header-test- += xen/interface/io/pciif.h
-header-test- += xen/interface/io/protocols.h
-header-test- += xen/interface/io/pvcalls.h
-header-test- += xen/interface/io/ring.h
-header-test- += xen/interface/io/sndif.h
-header-test- += xen/interface/io/tpmif.h
-header-test- += xen/interface/io/vscsiif.h
-header-test- += xen/interface/io/xs_wire.h
-header-test- += xen/interface/memory.h
-header-test- += xen/interface/nmi.h
-header-test- += xen/interface/physdev.h
-header-test- += xen/interface/platform.h
-header-test- += xen/interface/sched.h
-header-test- += xen/interface/vcpu.h
-header-test- += xen/interface/version.h
-header-test- += xen/interface/xen-mca.h
-header-test- += xen/interface/xen.h
-header-test- += xen/interface/xenpmu.h
-header-test- += xen/mem-reservation.h
-header-test- += xen/page.h
-header-test- += xen/platform_pci.h
-header-test- += xen/swiotlb-xen.h
-header-test- += xen/xen-front-pgdir-shbuf.h
-header-test- += xen/xen-ops.h
-header-test- += xen/xen.h
-header-test- += xen/xenbus.h
-
-# Do not include directly
-header-test- += linux/compiler-clang.h
-header-test- += linux/compiler-gcc.h
-header-test- += linux/patchkey.h
-header-test- += linux/rwlock_api_smp.h
-header-test- += linux/spinlock_types_up.h
-header-test- += linux/spinlock_up.h
-header-test- += linux/wimax/debug.h
-header-test- += rdma/uverbs_named_ioctl.h
-
-# asm-generic/*.h is used by asm/*.h, and should not be included directly
-header-test- += asm-generic/% uapi/asm-generic/%
-
-# Timestamp files touched by Kconfig
-header-test- += config/%
-
-# Timestamp files touched by scripts/adjust_autoksyms.sh
-header-test- += ksym/%
-
-# You could compile-test these, but maybe not so useful...
-header-test- += dt-bindings/%
-
-# Do not test generated headers. Stale headers are often left over when you
-# traverse the git history without cleaning.
-header-test- += generated/%
-
-# The rest are compile-tested
-header-test-pattern-y += */*.h */*/*.h */*/*/*.h */*/*/*/*.h
diff --git a/include/acpi/acbuffer.h b/include/acpi/acbuffer.h
index c2acd29f973d..531c1e9a7d10 100644
--- a/include/acpi/acbuffer.h
+++ b/include/acpi/acbuffer.h
@@ -3,7 +3,7 @@
*
* Name: acbuffer.h - Support for buffers returned by ACPI predefined names
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 42d573172e53..5940a3c68a96 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -3,7 +3,7 @@
*
* Name: acconfig.h - Global configuration constants
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acexcep.h b/include/acpi/acexcep.h
index 233a72f169bb..436cd1411c3a 100644
--- a/include/acpi/acexcep.h
+++ b/include/acpi/acexcep.h
@@ -3,7 +3,7 @@
*
* Name: acexcep.h - Exception codes returned by the ACPI subsystem
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index 8b3eae96706a..8922edb32730 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -3,7 +3,7 @@
*
* Name: acnames.h - Global names and strings
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acoutput.h b/include/acpi/acoutput.h
index c50542dc71e0..c5d900c0ecda 100644
--- a/include/acpi/acoutput.h
+++ b/include/acpi/acoutput.h
@@ -3,7 +3,7 @@
*
* Name: acoutput.h -- debug output
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acpi.h b/include/acpi/acpi.h
index bc7d39ecf574..e3e8051d4812 100644
--- a/include/acpi/acpi.h
+++ b/include/acpi/acpi.h
@@ -3,7 +3,7 @@
*
* Name: acpi.h - Master public include file used to interface to ACPICA
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 175f7b40c585..a92bea7184a8 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -78,12 +78,9 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const guid_t *guid, u64 rev,
bool acpi_dev_found(const char *hid);
bool acpi_dev_present(const char *hid, const char *uid, s64 hrv);
-struct acpi_device *
-acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv);
-
#ifdef CONFIG_ACPI
-#include <linux/proc_fs.h>
+struct proc_dir_entry;
#define ACPI_BUS_FILE_ROOT "acpi"
extern struct proc_dir_entry *acpi_root_dir;
@@ -683,6 +680,11 @@ static inline bool acpi_device_can_poweroff(struct acpi_device *adev)
adev->power.states[ACPI_STATE_D3_HOT].flags.explicit_set);
}
+bool acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2);
+
+struct acpi_device *
+acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv);
+
static inline void acpi_dev_put(struct acpi_device *adev)
{
put_device(&adev->dev);
diff --git a/include/acpi/acpiosxf.h b/include/acpi/acpiosxf.h
index 2e63b7b390f5..33bb8c9a089d 100644
--- a/include/acpi/acpiosxf.h
+++ b/include/acpi/acpiosxf.h
@@ -5,7 +5,7 @@
* interfaces must be implemented by OSL to interface the
* ACPI components to the host operating system.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index e5e041413581..87fc14e97d2b 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -3,7 +3,7 @@
*
* Name: acpixf.h - External interfaces to the ACPI subsystem
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -12,7 +12,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20190816
+#define ACPI_CA_VERSION 0x20200214
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
@@ -458,7 +458,11 @@ ACPI_EXTERNAL_RETURN_STATUS(acpi_status ACPI_INIT_FUNCTION
u8 physical))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
- acpi_load_table(struct acpi_table_header *table))
+ acpi_load_table(struct acpi_table_header *table,
+ u32 *table_idx))
+
+ACPI_EXTERNAL_RETURN_STATUS(acpi_status
+ acpi_unload_table(u32 table_index))
ACPI_EXTERNAL_RETURN_STATUS(acpi_status
acpi_unload_parent_table(acpi_handle object))
@@ -748,6 +752,8 @@ ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_dispatch_gpe(acpi_handle gpe_device, u3
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_disable_all_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_runtime_gpes(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status acpi_enable_all_wakeup_gpes(void))
+ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_gpe_status_set(u32 gpe_skip_number))
+ACPI_HW_DEPENDENT_RETURN_UINT32(u32 acpi_any_fixed_event_status_set(void))
ACPI_HW_DEPENDENT_RETURN_STATUS(acpi_status
acpi_get_gpe_device(u32 gpe_index,
diff --git a/include/acpi/acrestyp.h b/include/acpi/acrestyp.h
index 62930583219f..d3521894ce6a 100644
--- a/include/acpi/acrestyp.h
+++ b/include/acpi/acrestyp.h
@@ -3,7 +3,7 @@
*
* Name: acrestyp.h - Defines, types, and structures for resource descriptors
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/actbl.h b/include/acpi/actbl.h
index d568128025df..5007c41f4d54 100644
--- a/include/acpi/actbl.h
+++ b/include/acpi/actbl.h
@@ -3,7 +3,7 @@
*
* Name: actbl.h - Basic ACPI Table Definitions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/actbl1.h b/include/acpi/actbl1.h
index 22c039ebc6c5..43549547ed3e 100644
--- a/include/acpi/actbl1.h
+++ b/include/acpi/actbl1.h
@@ -3,7 +3,7 @@
*
* Name: actbl1.h - Additional ACPI table definitions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -862,7 +862,7 @@ enum acpi_erst_instructions {
/* Command status return values */
enum acpi_erst_command_status {
- ACPI_ERST_SUCESS = 0,
+ ACPI_ERST_SUCCESS = 0,
ACPI_ERST_NO_SPACE = 1,
ACPI_ERST_NOT_AVAILABLE = 2,
ACPI_ERST_FAILURE = 3,
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index e45ced27f4c3..b818ba60e19d 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -3,7 +3,7 @@
*
* Name: actbl2.h - ACPI Table Definitions (tables not in ACPI spec)
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/actbl3.h b/include/acpi/actbl3.h
index 7a58c10ce421..2bf3baf819bb 100644
--- a/include/acpi/actbl3.h
+++ b/include/acpi/actbl3.h
@@ -3,7 +3,7 @@
*
* Name: actbl3.h - ACPI Table Definitions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index 2f3f28c7cea3..4defed58ea33 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -3,7 +3,7 @@
*
* Name: actypes.h - Common data types for the entire ACPI subsystem
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -532,11 +532,12 @@ typedef u64 acpi_integer;
strnlen (a, ACPI_NAMESEG_SIZE) == ACPI_NAMESEG_SIZE)
/*
- * Algorithm to obtain access bit width.
+ * Algorithm to obtain access bit or byte width.
* Can be used with access_width of struct acpi_generic_address and access_size of
* struct acpi_resource_generic_register.
*/
#define ACPI_ACCESS_BIT_WIDTH(size) (1 << ((size) + 2))
+#define ACPI_ACCESS_BYTE_WIDTH(size) (1 << ((size) - 1))
/*******************************************************************************
*
diff --git a/include/acpi/acuuid.h b/include/acpi/acuuid.h
index 23262cab047a..9dd4689a39cf 100644
--- a/include/acpi/acuuid.h
+++ b/include/acpi/acuuid.h
@@ -3,7 +3,7 @@
*
* Name: acuuid.h - ACPI-related UUID/GUID definitions
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/button.h b/include/acpi/button.h
index 3a2b8535dec6..af2fce5d2ee3 100644
--- a/include/acpi/button.h
+++ b/include/acpi/button.h
@@ -2,21 +2,13 @@
#ifndef ACPI_BUTTON_H
#define ACPI_BUTTON_H
-#include <linux/notifier.h>
+#define ACPI_BUTTON_HID_POWER "PNP0C0C"
+#define ACPI_BUTTON_HID_LID "PNP0C0D"
+#define ACPI_BUTTON_HID_SLEEP "PNP0C0E"
#if IS_ENABLED(CONFIG_ACPI_BUTTON)
-extern int acpi_lid_notifier_register(struct notifier_block *nb);
-extern int acpi_lid_notifier_unregister(struct notifier_block *nb);
extern int acpi_lid_open(void);
#else
-static inline int acpi_lid_notifier_register(struct notifier_block *nb)
-{
- return 0;
-}
-static inline int acpi_lid_notifier_unregister(struct notifier_block *nb)
-{
- return 0;
-}
static inline int acpi_lid_open(void)
{
return 1;
diff --git a/include/acpi/platform/acenv.h b/include/acpi/platform/acenv.h
index 35ab3f87cc29..8f6b2654c0b3 100644
--- a/include/acpi/platform/acenv.h
+++ b/include/acpi/platform/acenv.h
@@ -3,7 +3,7 @@
*
* Name: acenv.h - Host and compiler configuration
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
@@ -128,6 +128,17 @@
#endif
+/*
+ * acpisrc CR\LF support
+ * Unix file line endings do not include the carriage return.
+ * If the acpisrc utility is being built using a microsoft compiler, it means
+ * that it will be running on a windows machine which means that the output is
+ * expected to have CR/LF newlines. If the acpisrc utility is built with
+ * anything else, it will likely run on a system with LF newlines. This flag
+ * tells the acpisrc utility that newlines will be in the LF format.
+ */
+#define ACPI_SRC_OS_LF_ONLY 0
+
/*! [Begin] no source code translation */
/******************************************************************************
diff --git a/include/acpi/platform/acenvex.h b/include/acpi/platform/acenvex.h
index 2e36c8344897..c3facf5f8495 100644
--- a/include/acpi/platform/acenvex.h
+++ b/include/acpi/platform/acenvex.h
@@ -3,7 +3,7 @@
*
* Name: acenvex.h - Extra host and compiler configuration
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/acgcc.h b/include/acpi/platform/acgcc.h
index 6a0705b433d2..7d63d03cf507 100644
--- a/include/acpi/platform/acgcc.h
+++ b/include/acpi/platform/acgcc.h
@@ -3,7 +3,7 @@
*
* Name: acgcc.h - GCC specific defines, etc.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/acgccex.h b/include/acpi/platform/acgccex.h
index 8dda2856aca1..7c88fd1de955 100644
--- a/include/acpi/platform/acgccex.h
+++ b/include/acpi/platform/acgccex.h
@@ -3,7 +3,7 @@
*
* Name: acgccex.h - Extra GCC specific defines, etc.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/acintel.h b/include/acpi/platform/acintel.h
index d2cc247248cb..e7fd5e71be62 100644
--- a/include/acpi/platform/acintel.h
+++ b/include/acpi/platform/acintel.h
@@ -3,7 +3,7 @@
*
* Name: acintel.h - VC specific defines, etc.
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/aclinux.h b/include/acpi/platform/aclinux.h
index 310501994c02..987e2af7c335 100644
--- a/include/acpi/platform/aclinux.h
+++ b/include/acpi/platform/aclinux.h
@@ -3,7 +3,7 @@
*
* Name: aclinux.h - OS specific defines, etc. for Linux
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/include/acpi/platform/aclinuxex.h b/include/acpi/platform/aclinuxex.h
index cc4f1eb53cba..04f88f2de781 100644
--- a/include/acpi/platform/aclinuxex.h
+++ b/include/acpi/platform/aclinuxex.h
@@ -3,7 +3,7 @@
*
* Name: aclinuxex.h - Extra OS specific defines, etc. for Linux
*
- * Copyright (C) 2000 - 2019, Intel Corp.
+ * Copyright (C) 2000 - 2020, Intel Corp.
*
*****************************************************************************/
diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
deleted file mode 100644
index e3667c9a33a5..000000000000
--- a/include/asm-generic/4level-fixup.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _4LEVEL_FIXUP_H
-#define _4LEVEL_FIXUP_H
-
-#define __ARCH_HAS_4LEVEL_HACK
-#define __PAGETABLE_PUD_FOLDED 1
-
-#define PUD_SHIFT PGDIR_SHIFT
-#define PUD_SIZE PGDIR_SIZE
-#define PUD_MASK PGDIR_MASK
-#define PTRS_PER_PUD 1
-
-#define pud_t pgd_t
-
-#define pmd_alloc(mm, pud, address) \
- ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
- NULL: pmd_offset(pud, address))
-
-#define pud_offset(pgd, start) (pgd)
-#define pud_none(pud) 0
-#define pud_bad(pud) 0
-#define pud_present(pud) 1
-#define pud_ERROR(pud) do { } while (0)
-#define pud_clear(pud) pgd_clear(pud)
-#define pud_val(pud) pgd_val(pud)
-#define pud_populate(mm, pud, pmd) pgd_populate(mm, pud, pmd)
-#define pud_page(pud) pgd_page(pud)
-#define pud_page_vaddr(pud) pgd_page_vaddr(pud)
-
-#undef pud_free_tlb
-#define pud_free_tlb(tlb, x, addr) do { } while (0)
-#define pud_free(mm, x) do { } while (0)
-#define __pud_free_tlb(tlb, x, addr) do { } while (0)
-
-#undef pud_addr_end
-#define pud_addr_end(addr, end) (end)
-
-#include <asm-generic/5level-fixup.h>
-
-#endif
diff --git a/include/asm-generic/5level-fixup.h b/include/asm-generic/5level-fixup.h
index f6947da70d71..4c74b1c1d13b 100644
--- a/include/asm-generic/5level-fixup.h
+++ b/include/asm-generic/5level-fixup.h
@@ -51,7 +51,6 @@ static inline int p4d_present(p4d_t p4d)
#undef p4d_free_tlb
#define p4d_free_tlb(tlb, x, addr) do { } while (0)
#define p4d_free(mm, x) do { } while (0)
-#define __p4d_free_tlb(tlb, x, addr) do { } while (0)
#undef p4d_addr_end
#define p4d_addr_end(addr, end) (end)
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index adff14fcb8e4..cd17d50697cc 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -4,4 +4,6 @@
# (This file is not included when SRCARCH=um since UML borrows several
# asm headers from the host architecutre.)
+mandatory-y += dma-contiguous.h
+mandatory-y += msi.h
mandatory-y += simd.h
diff --git a/include/asm-generic/bitops-instrumented.h b/include/asm-generic/bitops-instrumented.h
deleted file mode 100644
index ddd1c6d9d8db..000000000000
--- a/include/asm-generic/bitops-instrumented.h
+++ /dev/null
@@ -1,263 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-
-/*
- * This file provides wrappers with sanitizer instrumentation for bit
- * operations.
- *
- * To use this functionality, an arch's bitops.h file needs to define each of
- * the below bit operations with an arch_ prefix (e.g. arch_set_bit(),
- * arch___set_bit(), etc.).
- */
-#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_H
-#define _ASM_GENERIC_BITOPS_INSTRUMENTED_H
-
-#include <linux/kasan-checks.h>
-
-/**
- * set_bit - Atomically set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This is a relaxed atomic operation (no implied memory barriers).
- *
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static inline void set_bit(long nr, volatile unsigned long *addr)
-{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
- arch_set_bit(nr, addr);
-}
-
-/**
- * __set_bit - Set a bit in memory
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * Unlike set_bit(), this function is non-atomic. If it is called on the same
- * region of memory concurrently, the effect may be that only one operation
- * succeeds.
- */
-static inline void __set_bit(long nr, volatile unsigned long *addr)
-{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
- arch___set_bit(nr, addr);
-}
-
-/**
- * clear_bit - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * This is a relaxed atomic operation (no implied memory barriers).
- */
-static inline void clear_bit(long nr, volatile unsigned long *addr)
-{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
- arch_clear_bit(nr, addr);
-}
-
-/**
- * __clear_bit - Clears a bit in memory
- * @nr: the bit to clear
- * @addr: the address to start counting from
- *
- * Unlike clear_bit(), this function is non-atomic. If it is called on the same
- * region of memory concurrently, the effect may be that only one operation
- * succeeds.
- */
-static inline void __clear_bit(long nr, volatile unsigned long *addr)
-{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
- arch___clear_bit(nr, addr);
-}
-
-/**
- * clear_bit_unlock - Clear a bit in memory, for unlock
- * @nr: the bit to set
- * @addr: the address to start counting from
- *
- * This operation is atomic and provides release barrier semantics.
- */
-static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
-{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
- arch_clear_bit_unlock(nr, addr);
-}
-
-/**
- * __clear_bit_unlock - Clears a bit in memory
- * @nr: Bit to clear
- * @addr: Address to start counting from
- *
- * This is a non-atomic operation but implies a release barrier before the
- * memory operation. It can be used for an unlock if no other CPUs can
- * concurrently modify other bits in the word.
- */
-static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
-{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
- arch___clear_bit_unlock(nr, addr);
-}
-
-/**
- * change_bit - Toggle a bit in memory
- * @nr: Bit to change
- * @addr: Address to start counting from
- *
- * This is a relaxed atomic operation (no implied memory barriers).
- *
- * Note that @nr may be almost arbitrarily large; this function is not
- * restricted to acting on a single-word quantity.
- */
-static inline void change_bit(long nr, volatile unsigned long *addr)
-{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
- arch_change_bit(nr, addr);
-}
-
-/**
- * __change_bit - Toggle a bit in memory
- * @nr: the bit to change
- * @addr: the address to start counting from
- *
- * Unlike change_bit(), this function is non-atomic. If it is called on the same
- * region of memory concurrently, the effect may be that only one operation
- * succeeds.
- */
-static inline void __change_bit(long nr, volatile unsigned long *addr)
-{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
- arch___change_bit(nr, addr);
-}
-
-/**
- * test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This is an atomic fully-ordered operation (implied full memory barrier).
- */
-static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
-{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
- return arch_test_and_set_bit(nr, addr);
-}
-
-/**
- * __test_and_set_bit - Set a bit and return its old value
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is non-atomic. If two instances of this operation race, one
- * can appear to succeed but actually fail.
- */
-static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
-{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
- return arch___test_and_set_bit(nr, addr);
-}
-
-/**
- * test_and_set_bit_lock - Set a bit and return its old value, for lock
- * @nr: Bit to set
- * @addr: Address to count from
- *
- * This operation is atomic and provides acquire barrier semantics if
- * the returned value is 0.
- * It can be used to implement bit locks.
- */
-static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
-{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
- return arch_test_and_set_bit_lock(nr, addr);
-}
-
-/**
- * test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This is an atomic fully-ordered operation (implied full memory barrier).
- */
-static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
-{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
- return arch_test_and_clear_bit(nr, addr);
-}
-
-/**
- * __test_and_clear_bit - Clear a bit and return its old value
- * @nr: Bit to clear
- * @addr: Address to count from
- *
- * This operation is non-atomic. If two instances of this operation race, one
- * can appear to succeed but actually fail.
- */
-static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
-{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
- return arch___test_and_clear_bit(nr, addr);
-}
-
-/**
- * test_and_change_bit - Change a bit and return its old value
- * @nr: Bit to change
- * @addr: Address to count from
- *
- * This is an atomic fully-ordered operation (implied full memory barrier).
- */
-static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
-{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
- return arch_test_and_change_bit(nr, addr);
-}
-
-/**
- * __test_and_change_bit - Change a bit and return its old value
- * @nr: Bit to change
- * @addr: Address to count from
- *
- * This operation is non-atomic. If two instances of this operation race, one
- * can appear to succeed but actually fail.
- */
-static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
-{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
- return arch___test_and_change_bit(nr, addr);
-}
-
-/**
- * test_bit - Determine whether a bit is set
- * @nr: bit number to test
- * @addr: Address to start counting from
- */
-static inline bool test_bit(long nr, const volatile unsigned long *addr)
-{
- kasan_check_read(addr + BIT_WORD(nr), sizeof(long));
- return arch_test_bit(nr, addr);
-}
-
-#if defined(arch_clear_bit_unlock_is_negative_byte)
-/**
- * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
- * byte is negative, for unlock.
- * @nr: the bit to clear
- * @addr: the address to start counting from
- *
- * This operation is atomic and provides release barrier semantics.
- *
- * This is a bit of a one-trick-pony for the filemap code, which clears
- * PG_locked and tests PG_waiters,
- */
-static inline bool
-clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
-{
- kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
- return arch_clear_bit_unlock_is_negative_byte(nr, addr);
-}
-/* Let everybody know we have it. */
-#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
-#endif
-
-#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_H */
diff --git a/include/asm-generic/bitops.h b/include/asm-generic/bitops.h
index bfc96bf6606e..df9b5bc3d282 100644
--- a/include/asm-generic/bitops.h
+++ b/include/asm-generic/bitops.h
@@ -4,8 +4,9 @@
/*
* For the benefit of those who are trying to port Linux to another
- * architecture, here are some C-language equivalents. You should
- * recode these in the native assembly language, if at all possible.
+ * architecture, here are some C-language equivalents. They should
+ * generate reasonable code, so take a look at what your compiler spits
+ * out before rolling your own buggy implementation in assembly language.
*
* C language equivalents written by Theodore Ts'o, 9/26/92
*/
diff --git a/include/asm-generic/bitops/find.h b/include/asm-generic/bitops/find.h
index 8a1ee10014de..9fdf21302fdf 100644
--- a/include/asm-generic/bitops/find.h
+++ b/include/asm-generic/bitops/find.h
@@ -80,4 +80,21 @@ extern unsigned long find_first_zero_bit(const unsigned long *addr,
#endif /* CONFIG_GENERIC_FIND_FIRST_BIT */
+/**
+ * find_next_clump8 - find next 8-bit clump with set bits in a memory region
+ * @clump: location to store copy of found clump
+ * @addr: address to base the search on
+ * @size: bitmap size in number of bits
+ * @offset: bit offset at which to start searching
+ *
+ * Returns the bit offset for the next set clump; the found clump value is
+ * copied to the location pointed by @clump. If no bits are set, returns @size.
+ */
+extern unsigned long find_next_clump8(unsigned long *clump,
+ const unsigned long *addr,
+ unsigned long size, unsigned long offset);
+
+#define find_first_clump8(clump, bits, size) \
+ find_next_clump8((clump), (bits), (size), 0)
+
#endif /*_ASM_GENERIC_BITOPS_FIND_H_ */
diff --git a/include/asm-generic/bitops/instrumented-atomic.h b/include/asm-generic/bitops/instrumented-atomic.h
new file mode 100644
index 000000000000..18ce3c9e8eec
--- /dev/null
+++ b/include/asm-generic/bitops/instrumented-atomic.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This file provides wrappers with sanitizer instrumentation for atomic bit
+ * operations.
+ *
+ * To use this functionality, an arch's bitops.h file needs to define each of
+ * the below bit operations with an arch_ prefix (e.g. arch_set_bit(),
+ * arch___set_bit(), etc.).
+ */
+#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
+#define _ASM_GENERIC_BITOPS_INSTRUMENTED_ATOMIC_H
+
+#include <linux/kasan-checks.h>
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This is a relaxed atomic operation (no implied memory barriers).
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void set_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ arch_set_bit(nr, addr);
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * This is a relaxed atomic operation (no implied memory barriers).
+ */
+static inline void clear_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ arch_clear_bit(nr, addr);
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ *
+ * This is a relaxed atomic operation (no implied memory barriers).
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void change_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ arch_change_bit(nr, addr);
+}
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This is an atomic fully-ordered operation (implied full memory barrier).
+ */
+static inline bool test_and_set_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ return arch_test_and_set_bit(nr, addr);
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This is an atomic fully-ordered operation (implied full memory barrier).
+ */
+static inline bool test_and_clear_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ return arch_test_and_clear_bit(nr, addr);
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This is an atomic fully-ordered operation (implied full memory barrier).
+ */
+static inline bool test_and_change_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ return arch_test_and_change_bit(nr, addr);
+}
+
+#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
diff --git a/include/asm-generic/bitops/instrumented-lock.h b/include/asm-generic/bitops/instrumented-lock.h
new file mode 100644
index 000000000000..ec53fdeea9ec
--- /dev/null
+++ b/include/asm-generic/bitops/instrumented-lock.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This file provides wrappers with sanitizer instrumentation for bit
+ * locking operations.
+ *
+ * To use this functionality, an arch's bitops.h file needs to define each of
+ * the below bit operations with an arch_ prefix (e.g. arch_set_bit(),
+ * arch___set_bit(), etc.).
+ */
+#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
+#define _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H
+
+#include <linux/kasan-checks.h>
+
+/**
+ * clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is atomic and provides release barrier semantics.
+ */
+static inline void clear_bit_unlock(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ arch_clear_bit_unlock(nr, addr);
+}
+
+/**
+ * __clear_bit_unlock - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * This is a non-atomic operation but implies a release barrier before the
+ * memory operation. It can be used for an unlock if no other CPUs can
+ * concurrently modify other bits in the word.
+ */
+static inline void __clear_bit_unlock(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ arch___clear_bit_unlock(nr, addr);
+}
+
+/**
+ * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and provides acquire barrier semantics if
+ * the returned value is 0.
+ * It can be used to implement bit locks.
+ */
+static inline bool test_and_set_bit_lock(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ return arch_test_and_set_bit_lock(nr, addr);
+}
+
+#if defined(arch_clear_bit_unlock_is_negative_byte)
+/**
+ * clear_bit_unlock_is_negative_byte - Clear a bit in memory and test if bottom
+ * byte is negative, for unlock.
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * This operation is atomic and provides release barrier semantics.
+ *
+ * This is a bit of a one-trick-pony for the filemap code, which clears
+ * PG_locked and tests PG_waiters,
+ */
+static inline bool
+clear_bit_unlock_is_negative_byte(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ return arch_clear_bit_unlock_is_negative_byte(nr, addr);
+}
+/* Let everybody know we have it. */
+#define clear_bit_unlock_is_negative_byte clear_bit_unlock_is_negative_byte
+#endif
+
+#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_LOCK_H */
diff --git a/include/asm-generic/bitops/instrumented-non-atomic.h b/include/asm-generic/bitops/instrumented-non-atomic.h
new file mode 100644
index 000000000000..95ff28d128a1
--- /dev/null
+++ b/include/asm-generic/bitops/instrumented-non-atomic.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * This file provides wrappers with sanitizer instrumentation for non-atomic
+ * bit operations.
+ *
+ * To use this functionality, an arch's bitops.h file needs to define each of
+ * the below bit operations with an arch_ prefix (e.g. arch_set_bit(),
+ * arch___set_bit(), etc.).
+ */
+#ifndef _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
+#define _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H
+
+#include <linux/kasan-checks.h>
+
+/**
+ * __set_bit - Set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Unlike set_bit(), this function is non-atomic. If it is called on the same
+ * region of memory concurrently, the effect may be that only one operation
+ * succeeds.
+ */
+static inline void __set_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ arch___set_bit(nr, addr);
+}
+
+/**
+ * __clear_bit - Clears a bit in memory
+ * @nr: the bit to clear
+ * @addr: the address to start counting from
+ *
+ * Unlike clear_bit(), this function is non-atomic. If it is called on the same
+ * region of memory concurrently, the effect may be that only one operation
+ * succeeds.
+ */
+static inline void __clear_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ arch___clear_bit(nr, addr);
+}
+
+/**
+ * __change_bit - Toggle a bit in memory
+ * @nr: the bit to change
+ * @addr: the address to start counting from
+ *
+ * Unlike change_bit(), this function is non-atomic. If it is called on the same
+ * region of memory concurrently, the effect may be that only one operation
+ * succeeds.
+ */
+static inline void __change_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ arch___change_bit(nr, addr);
+}
+
+/**
+ * __test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic. If two instances of this operation race, one
+ * can appear to succeed but actually fail.
+ */
+static inline bool __test_and_set_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ return arch___test_and_set_bit(nr, addr);
+}
+
+/**
+ * __test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic. If two instances of this operation race, one
+ * can appear to succeed but actually fail.
+ */
+static inline bool __test_and_clear_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ return arch___test_and_clear_bit(nr, addr);
+}
+
+/**
+ * __test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is non-atomic. If two instances of this operation race, one
+ * can appear to succeed but actually fail.
+ */
+static inline bool __test_and_change_bit(long nr, volatile unsigned long *addr)
+{
+ kasan_check_write(addr + BIT_WORD(nr), sizeof(long));
+ return arch___test_and_change_bit(nr, addr);
+}
+
+/**
+ * test_bit - Determine whether a bit is set
+ * @nr: bit number to test
+ * @addr: Address to start counting from
+ */
+static inline bool test_bit(long nr, const volatile unsigned long *addr)
+{
+ kasan_check_read(addr + BIT_WORD(nr), sizeof(long));
+ return arch_test_bit(nr, addr);
+}
+
+#endif /* _ASM_GENERIC_BITOPS_INSTRUMENTED_NON_ATOMIC_H */
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h
index a950a22c4890..cac7404b2bdd 100644
--- a/include/asm-generic/cacheflush.h
+++ b/include/asm-generic/cacheflush.h
@@ -11,71 +11,102 @@
* The cache doesn't need to be flushed when TLB entries change when
* the cache is mapped to physical memory, not virtual memory
*/
+#ifndef flush_cache_all
static inline void flush_cache_all(void)
{
}
+#endif
+#ifndef flush_cache_mm
static inline void flush_cache_mm(struct mm_struct *mm)
{
}
+#endif
+#ifndef flush_cache_dup_mm
static inline void flush_cache_dup_mm(struct mm_struct *mm)
{
}
+#endif
+#ifndef flush_cache_range
static inline void flush_cache_range(struct vm_area_struct *vma,
unsigned long start,
unsigned long end)
{
}
+#endif
+#ifndef flush_cache_page
static inline void flush_cache_page(struct vm_area_struct *vma,
unsigned long vmaddr,
unsigned long pfn)
{
}
+#endif
+#ifndef flush_dcache_page
static inline void flush_dcache_page(struct page *page)
{
}
+#endif
+#ifndef flush_dcache_mmap_lock
static inline void flush_dcache_mmap_lock(struct address_space *mapping)
{
}
+#endif
+#ifndef flush_dcache_mmap_unlock
static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
{
}
+#endif
+#ifndef flush_icache_range
static inline void flush_icache_range(unsigned long start, unsigned long end)
{
}
+#endif
+#ifndef flush_icache_page
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
{
}
+#endif
+#ifndef flush_icache_user_range
static inline void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page,
unsigned long addr, int len)
{
}
+#endif
+#ifndef flush_cache_vmap
static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{
}
+#endif
+#ifndef flush_cache_vunmap
static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{
}
+#endif
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
+#ifndef copy_to_user_page
+#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \
} while (0)
+#endif
+
+#ifndef copy_from_user_page
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
+#endif
#endif /* __ASM_CACHEFLUSH_H */
diff --git a/include/asm-generic/export.h b/include/asm-generic/export.h
index fa577978fbbd..365345f9a9e3 100644
--- a/include/asm-generic/export.h
+++ b/include/asm-generic/export.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __ASM_GENERIC_EXPORT_H
#define __ASM_GENERIC_EXPORT_H
@@ -26,25 +27,25 @@
.endm
/*
- * note on .section use: @progbits vs %progbits nastiness doesn't matter,
- * since we immediately emit into those sections anyway.
+ * note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
+ * section flag requires it. Use '%progbits' instead of '@progbits' since the
+ * former apparently works on all arches according to the binutils source.
*/
+
.macro ___EXPORT_SYMBOL name,val,sec
#ifdef CONFIG_MODULES
- .globl __ksymtab_\name
.section ___ksymtab\sec+\name,"a"
.balign KSYM_ALIGN
__ksymtab_\name:
__put \val, __kstrtab_\name
.previous
- .section __ksymtab_strings,"a"
+ .section __ksymtab_strings,"aMS",%progbits,1
__kstrtab_\name:
.asciz "\name"
.previous
#ifdef CONFIG_MODVERSIONS
.section ___kcrctab\sec+\name,"a"
.balign KCRC_ALIGN
-__kcrctab_\name:
#if defined(CONFIG_MODULE_REL_CRCS)
.long __crc_\name - .
#else
diff --git a/include/asm-generic/futex.h b/include/asm-generic/futex.h
index 02970b11f71f..f4c3470480c7 100644
--- a/include/asm-generic/futex.h
+++ b/include/asm-generic/futex.h
@@ -34,7 +34,6 @@ arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
u32 tmp;
preempt_disable();
- pagefault_disable();
ret = -EFAULT;
if (unlikely(get_user(oldval, uaddr) != 0))
@@ -67,7 +66,6 @@ arch_futex_atomic_op_inuser(int op, u32 oparg, int *oval, u32 __user *uaddr)
ret = -EFAULT;
out_pagefault_enable:
- pagefault_enable();
preempt_enable();
if (ret == 0)
diff --git a/include/asm-generic/io.h b/include/asm-generic/io.h
index d02806513670..d39ac997dda8 100644
--- a/include/asm-generic/io.h
+++ b/include/asm-generic/io.h
@@ -922,39 +922,17 @@ static inline void *phys_to_virt(unsigned long address)
/**
* DOC: ioremap() and ioremap_*() variants
*
- * If you have an IOMMU your architecture is expected to have both ioremap()
- * and iounmap() implemented otherwise the asm-generic helpers will provide a
- * direct mapping.
+ * Architectures with an MMU are expected to provide ioremap() and iounmap()
+ * themselves or rely on GENERIC_IOREMAP. For NOMMU architectures we provide
+ * a default nop-op implementation that expect that the physical address used
+ * for MMIO are already marked as uncached, and can be used as kernel virtual
+ * addresses.
*
- * There are ioremap_*() call variants, if you have no IOMMU we naturally will
- * default to direct mapping for all of them, you can override these defaults.
- * If you have an IOMMU you are highly encouraged to provide your own
- * ioremap variant implementation as there currently is no safe architecture
- * agnostic default. To avoid possible improper behaviour default asm-generic
- * ioremap_*() variants all return NULL when an IOMMU is available. If you've
- * defined your own ioremap_*() variant you must then declare your own
- * ioremap_*() variant as defined to itself to avoid the default NULL return.
+ * ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes
+ * for specific drivers if the architecture choses to implement them. If they
+ * are not implemented we fall back to plain ioremap.
*/
-
-#ifdef CONFIG_MMU
-
-#ifndef ioremap_uc
-#define ioremap_uc ioremap_uc
-static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
-{
- return NULL;
-}
-#endif
-
-#else /* !CONFIG_MMU */
-
-/*
- * Change "struct page" to physical address.
- *
- * This implementation is for the no-MMU case only... if you have an MMU
- * you'll need to provide your own definitions.
- */
-
+#ifndef CONFIG_MMU
#ifndef ioremap
#define ioremap ioremap
static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
@@ -965,42 +943,43 @@ static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
#ifndef iounmap
#define iounmap iounmap
-
static inline void iounmap(void __iomem *addr)
{
}
#endif
-#endif /* CONFIG_MMU */
-#ifndef ioremap_nocache
-void __iomem *ioremap(phys_addr_t phys_addr, size_t size);
-#define ioremap_nocache ioremap_nocache
-static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
-{
- return ioremap(offset, size);
-}
-#endif
+#elif defined(CONFIG_GENERIC_IOREMAP)
+#include <asm/pgtable.h>
-#ifndef ioremap_uc
-#define ioremap_uc ioremap_uc
-static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
+void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot);
+void iounmap(volatile void __iomem *addr);
+
+static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
{
- return ioremap_nocache(offset, size);
+ /* _PAGE_IOREMAP needs to be supplied by the architecture */
+ return ioremap_prot(addr, size, _PAGE_IOREMAP);
}
-#endif
+#endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */
#ifndef ioremap_wc
-#define ioremap_wc ioremap_wc
-static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
-{
- return ioremap_nocache(offset, size);
-}
+#define ioremap_wc ioremap
#endif
#ifndef ioremap_wt
-#define ioremap_wt ioremap_wt
-static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
+#define ioremap_wt ioremap
+#endif
+
+/*
+ * ioremap_uc is special in that we do require an explicit architecture
+ * implementation. In general you do not want to use this function in a
+ * driver and use plain ioremap, which is uncached by default. Similarly
+ * architectures should not implement it unless they have a very good
+ * reason.
+ */
+#ifndef ioremap_uc
+#define ioremap_uc ioremap_uc
+static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
{
- return ioremap_nocache(offset, size);
+ return NULL;
}
#endif
diff --git a/include/asm-generic/iomap.h b/include/asm-generic/iomap.h
index a008f504a2d0..9d28a5e82f73 100644
--- a/include/asm-generic/iomap.h
+++ b/include/asm-generic/iomap.h
@@ -94,11 +94,11 @@ extern void ioport_unmap(void __iomem *);
#endif
#ifndef ARCH_HAS_IOREMAP_WC
-#define ioremap_wc ioremap_nocache
+#define ioremap_wc ioremap
#endif
#ifndef ARCH_HAS_IOREMAP_WT
-#define ioremap_wt ioremap_nocache
+#define ioremap_wt ioremap
#endif
#ifdef CONFIG_PCI
diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
index 6736ed2f632b..4dbb177d1150 100644
--- a/include/asm-generic/mm_hooks.h
+++ b/include/asm-generic/mm_hooks.h
@@ -22,11 +22,6 @@ static inline void arch_unmap(struct mm_struct *mm,
{
}
-static inline void arch_bprm_mm_init(struct mm_struct *mm,
- struct vm_area_struct *vma)
-{
-}
-
static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
bool write, bool execute, bool foreign)
{
diff --git a/include/asm-generic/mshyperv.h b/include/asm-generic/mshyperv.h
index 18d8e2d8210f..b3f1082cc435 100644
--- a/include/asm-generic/mshyperv.h
+++ b/include/asm-generic/mshyperv.h
@@ -166,10 +166,12 @@ static inline int cpumask_to_vpset(struct hv_vpset *vpset,
void hyperv_report_panic(struct pt_regs *regs, long err);
void hyperv_report_panic_msg(phys_addr_t pa, size_t size);
bool hv_is_hyperv_initialized(void);
+bool hv_is_hibernation_supported(void);
void hyperv_cleanup(void);
void hv_setup_sched_clock(void *sched_clock);
#else /* CONFIG_HYPERV */
static inline bool hv_is_hyperv_initialized(void) { return false; }
+static inline bool hv_is_hibernation_supported(void) { return false; }
static inline void hyperv_cleanup(void) {}
#endif /* CONFIG_HYPERV */
diff --git a/include/asm-generic/percpu.h b/include/asm-generic/percpu.h
index c2de013b2cf4..35e4a53b83e6 100644
--- a/include/asm-generic/percpu.h
+++ b/include/asm-generic/percpu.h
@@ -74,7 +74,7 @@ do { \
#define raw_cpu_generic_add_return(pcp, val) \
({ \
- typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \
+ typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \
\
*__p += val; \
*__p; \
@@ -82,7 +82,7 @@ do { \
#define raw_cpu_generic_xchg(pcp, nval) \
({ \
- typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \
+ typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \
typeof(pcp) __ret; \
__ret = *__p; \
*__p = nval; \
@@ -91,7 +91,7 @@ do { \
#define raw_cpu_generic_cmpxchg(pcp, oval, nval) \
({ \
- typeof(&(pcp)) __p = raw_cpu_ptr(&(pcp)); \
+ typeof(pcp) *__p = raw_cpu_ptr(&(pcp)); \
typeof(pcp) __ret; \
__ret = *__p; \
if (__ret == (oval)) \
@@ -101,8 +101,8 @@ do { \
#define raw_cpu_generic_cmpxchg_double(pcp1, pcp2, oval1, oval2, nval1, nval2) \
({ \
- typeof(&(pcp1)) __p1 = raw_cpu_ptr(&(pcp1)); \
- typeof(&(pcp2)) __p2 = raw_cpu_ptr(&(pcp2)); \
+ typeof(pcp1) *__p1 = raw_cpu_ptr(&(pcp1)); \
+ typeof(pcp2) *__p2 = raw_cpu_ptr(&(pcp2)); \
int __ret = 0; \
if (*__p1 == (oval1) && *__p2 == (oval2)) { \
*__p1 = nval1; \
diff --git a/include/asm-generic/pgtable-nop4d.h b/include/asm-generic/pgtable-nop4d.h
index aebab905e6cd..ce2cbb3c380f 100644
--- a/include/asm-generic/pgtable-nop4d.h
+++ b/include/asm-generic/pgtable-nop4d.h
@@ -50,7 +50,7 @@ static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
*/
#define p4d_alloc_one(mm, address) NULL
#define p4d_free(mm, x) do { } while (0)
-#define __p4d_free_tlb(tlb, x, a) do { } while (0)
+#define p4d_free_tlb(tlb, x, a) do { } while (0)
#undef p4d_addr_end
#define p4d_addr_end(addr, end) (end)
diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
index b85b8271a73d..0d9b28cba16d 100644
--- a/include/asm-generic/pgtable-nopmd.h
+++ b/include/asm-generic/pgtable-nopmd.h
@@ -60,7 +60,7 @@ static inline pmd_t * pmd_offset(pud_t * pud, unsigned long address)
static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
{
}
-#define __pmd_free_tlb(tlb, x, a) do { } while (0)
+#define pmd_free_tlb(tlb, x, a) do { } while (0)
#undef pmd_addr_end
#define pmd_addr_end(addr, end) (end)
diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
index c77a1d301155..d3776cb494c0 100644
--- a/include/asm-generic/pgtable-nopud.h
+++ b/include/asm-generic/pgtable-nopud.h
@@ -59,7 +59,7 @@ static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
*/
#define pud_alloc_one(mm, address) NULL
#define pud_free(mm, x) do { } while (0)
-#define __pud_free_tlb(tlb, x, a) do { } while (0)
+#define pud_free_tlb(tlb, x, a) do { } while (0)
#undef pud_addr_end
#define pud_addr_end(addr, end) (end)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 818691846c90..e2e2bef07dd2 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -558,8 +558,19 @@ static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
* Do the tests inline, but report and clear the bad entry in mm/memory.c.
*/
void pgd_clear_bad(pgd_t *);
+
+#ifndef __PAGETABLE_P4D_FOLDED
void p4d_clear_bad(p4d_t *);
+#else
+#define p4d_clear_bad(p4d) do { } while (0)
+#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
void pud_clear_bad(pud_t *);
+#else
+#define pud_clear_bad(p4d) do { } while (0)
+#endif
+
void pmd_clear_bad(pmd_t *);
static inline int pgd_none_or_clear_bad(pgd_t *pgd)
@@ -903,6 +914,21 @@ static inline int pud_write(pud_t pud)
}
#endif /* pud_write */
+#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
+static inline int pmd_devmap(pmd_t pmd)
+{
+ return 0;
+}
+static inline int pud_devmap(pud_t pud)
+{
+ return 0;
+}
+static inline int pgd_devmap(pgd_t pgd)
+{
+ return 0;
+}
+#endif
+
#if !defined(CONFIG_TRANSPARENT_HUGEPAGE) || \
(defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
!defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD))
@@ -912,6 +938,31 @@ static inline int pud_trans_huge(pud_t pud)
}
#endif
+/* See pmd_none_or_trans_huge_or_clear_bad for discussion. */
+static inline int pud_none_or_trans_huge_or_dev_or_clear_bad(pud_t *pud)
+{
+ pud_t pudval = READ_ONCE(*pud);
+
+ if (pud_none(pudval) || pud_trans_huge(pudval) || pud_devmap(pudval))
+ return 1;
+ if (unlikely(pud_bad(pudval))) {
+ pud_clear_bad(pud);
+ return 1;
+ }
+ return 0;
+}
+
+/* See pmd_trans_unstable for discussion. */
+static inline int pud_trans_unstable(pud_t *pud)
+{
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && \
+ defined(CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD)
+ return pud_none_or_trans_huge_or_dev_or_clear_bad(pud);
+#else
+ return 0;
+#endif
+}
+
#ifndef pmd_read_atomic
static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
{
@@ -1187,4 +1238,24 @@ static inline bool arch_has_pfn_modify_check(void)
#define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED)
#endif
+/*
+ * p?d_leaf() - true if this entry is a final mapping to a physical address.
+ * This differs from p?d_huge() by the fact that they are always available (if
+ * the architecture supports large pages at the appropriate level) even
+ * if CONFIG_HUGETLB_PAGE is not defined.
+ * Only meaningful when called on a valid entry.
+ */
+#ifndef pgd_leaf
+#define pgd_leaf(x) 0
+#endif
+#ifndef p4d_leaf
+#define p4d_leaf(x) 0
+#endif
+#ifndef pud_leaf
+#define pud_leaf(x) 0
+#endif
+#ifndef pmd_leaf
+#define pmd_leaf(x) 0
+#endif
+
#endif /* _ASM_GENERIC_PGTABLE_H */
diff --git a/include/asm-generic/tlb.h b/include/asm-generic/tlb.h
index 04c0644006fd..f391f6b500b4 100644
--- a/include/asm-generic/tlb.h
+++ b/include/asm-generic/tlb.h
@@ -56,6 +56,15 @@
* Defaults to flushing at tlb_end_vma() to reset the range; helps when
* there's large holes between the VMAs.
*
+ * - tlb_remove_table()
+ *
+ * tlb_remove_table() is the basic primitive to free page-table directories
+ * (__p*_free_tlb()). In it's most primitive form it is an alias for
+ * tlb_remove_page() below, for when page directories are pages and have no
+ * additional constraints.
+ *
+ * See also MMU_GATHER_TABLE_FREE and MMU_GATHER_RCU_TABLE_FREE.
+ *
* - tlb_remove_page() / __tlb_remove_page()
* - tlb_remove_page_size() / __tlb_remove_page_size()
*
@@ -121,65 +130,53 @@
*
* Additionally there are a few opt-in features:
*
- * HAVE_MMU_GATHER_PAGE_SIZE
+ * MMU_GATHER_PAGE_SIZE
*
* This ensures we call tlb_flush() every time tlb_change_page_size() actually
* changes the size and provides mmu_gather::page_size to tlb_flush().
*
- * HAVE_RCU_TABLE_FREE
+ * This might be useful if your architecture has size specific TLB
+ * invalidation instructions.
+ *
+ * MMU_GATHER_TABLE_FREE
*
* This provides tlb_remove_table(), to be used instead of tlb_remove_page()
- * for page directores (__p*_free_tlb()). This provides separate freeing of
- * the page-table pages themselves in a semi-RCU fashion (see comment below).
- * Useful if your architecture doesn't use IPIs for remote TLB invalidates
- * and therefore doesn't naturally serialize with software page-table walkers.
+ * for page directores (__p*_free_tlb()).
+ *
+ * Useful if your architecture has non-page page directories.
*
* When used, an architecture is expected to provide __tlb_remove_table()
* which does the actual freeing of these pages.
*
- * HAVE_RCU_TABLE_NO_INVALIDATE
+ * MMU_GATHER_RCU_TABLE_FREE
+ *
+ * Like MMU_GATHER_TABLE_FREE, and adds semi-RCU semantics to the free (see
+ * comment below).
*
- * This makes HAVE_RCU_TABLE_FREE avoid calling tlb_flush_mmu_tlbonly() before
- * freeing the page-table pages. This can be avoided if you use
- * HAVE_RCU_TABLE_FREE and your architecture does _NOT_ use the Linux
- * page-tables natively.
+ * Useful if your architecture doesn't use IPIs for remote TLB invalidates
+ * and therefore doesn't naturally serialize with software page-table walkers.
*
* MMU_GATHER_NO_RANGE
*
* Use this if your architecture lacks an efficient flush_tlb_range().
- */
-
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
-/*
- * Semi RCU freeing of the page directories.
- *
- * This is needed by some architectures to implement software pagetable walkers.
- *
- * gup_fast() and other software pagetable walkers do a lockless page-table
- * walk and therefore needs some synchronization with the freeing of the page
- * directories. The chosen means to accomplish that is by disabling IRQs over
- * the walk.
*
- * Architectures that use IPIs to flush TLBs will then automagically DTRT,
- * since we unlink the page, flush TLBs, free the page. Since the disabling of
- * IRQs delays the completion of the TLB flush we can never observe an already
- * freed page.
+ * MMU_GATHER_NO_GATHER
*
- * Architectures that do not have this (PPC) need to delay the freeing by some
- * other means, this is that means.
- *
- * What we do is batch the freed directory pages (tables) and RCU free them.
- * We use the sched RCU variant, as that guarantees that IRQ/preempt disabling
- * holds off grace periods.
- *
- * However, in order to batch these pages we need to allocate storage, this
- * allocation is deep inside the MM code and can thus easily fail on memory
- * pressure. To guarantee progress we fall back to single table freeing, see
- * the implementation of tlb_remove_table_one().
+ * If the option is set the mmu_gather will not track individual pages for
+ * delayed page free anymore. A platform that enables the option needs to
+ * provide its own implementation of the __tlb_remove_page_size() function to
+ * free pages.
*
+ * This is useful if your architecture already flushes TLB entries in the
+ * various ptep_get_and_clear() functions.
*/
+
+#ifdef CONFIG_MMU_GATHER_TABLE_FREE
+
struct mmu_table_batch {
+#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
struct rcu_head rcu;
+#endif
unsigned int nr;
void *tables[0];
};
@@ -189,9 +186,35 @@ struct mmu_table_batch {
extern void tlb_remove_table(struct mmu_gather *tlb, void *table);
+#else /* !CONFIG_MMU_GATHER_HAVE_TABLE_FREE */
+
+/*
+ * Without MMU_GATHER_TABLE_FREE the architecture is assumed to have page based
+ * page directories and we can use the normal page batching to free them.
+ */
+#define tlb_remove_table(tlb, page) tlb_remove_page((tlb), (page))
+
+#endif /* CONFIG_MMU_GATHER_TABLE_FREE */
+
+#ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE
+/*
+ * This allows an architecture that does not use the linux page-tables for
+ * hardware to skip the TLBI when freeing page tables.
+ */
+#ifndef tlb_needs_table_invalidate
+#define tlb_needs_table_invalidate() (true)
+#endif
+
+#else
+
+#ifdef tlb_needs_table_invalidate
+#error tlb_needs_table_invalidate() requires MMU_GATHER_RCU_TABLE_FREE
#endif
-#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+#endif /* CONFIG_MMU_GATHER_RCU_TABLE_FREE */
+
+
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
/*
* If we can't allocate a page to make a big batch of page pointers
* to work on, then just handle a few from the on-stack structure.
@@ -227,7 +250,7 @@ extern bool __tlb_remove_page_size(struct mmu_gather *tlb, struct page *page,
struct mmu_gather {
struct mm_struct *mm;
-#ifdef CONFIG_HAVE_RCU_TABLE_FREE
+#ifdef CONFIG_MMU_GATHER_TABLE_FREE
struct mmu_table_batch *batch;
#endif
@@ -266,22 +289,18 @@ struct mmu_gather {
unsigned int batch_count;
-#ifndef CONFIG_HAVE_MMU_GATHER_NO_GATHER
+#ifndef CONFIG_MMU_GATHER_NO_GATHER
struct mmu_gather_batch *active;
struct mmu_gather_batch local;
struct page *__pages[MMU_GATHER_BUNDLE];
-#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
unsigned int page_size;
#endif
#endif
};
-void arch_tlb_gather_mmu(struct mmu_gather *tlb,
- struct mm_struct *mm, unsigned long start, unsigned long end);
void tlb_flush_mmu(struct mmu_gather *tlb);
-void arch_tlb_finish_mmu(struct mmu_gather *tlb,
- unsigned long start, unsigned long end, bool force);
static inline void __tlb_adjust_range(struct mmu_gather *tlb,
unsigned long address,
@@ -394,7 +413,12 @@ tlb_update_vma_flags(struct mmu_gather *tlb, struct vm_area_struct *vma) { }
static inline void tlb_flush_mmu_tlbonly(struct mmu_gather *tlb)
{
- if (!tlb->end)
+ /*
+ * Anything calling __tlb_adjust_range() also sets at least one of
+ * these bits.
+ */
+ if (!(tlb->freed_tables || tlb->cleared_ptes || tlb->cleared_pmds ||
+ tlb->cleared_puds || tlb->cleared_p4ds))
return;
tlb_flush(tlb);
@@ -426,9 +450,9 @@ static inline void tlb_remove_page(struct mmu_gather *tlb, struct page *page)
static inline void tlb_change_page_size(struct mmu_gather *tlb,
unsigned int page_size)
{
-#ifdef CONFIG_HAVE_MMU_GATHER_PAGE_SIZE
+#ifdef CONFIG_MMU_GATHER_PAGE_SIZE
if (tlb->page_size && tlb->page_size != page_size) {
- if (!tlb->fullmm)
+ if (!tlb->fullmm && !tlb->need_flush_all)
tlb_flush_mmu(tlb);
}
@@ -584,7 +608,6 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
} while (0)
#endif
-#ifndef __ARCH_HAS_4LEVEL_HACK
#ifndef pud_free_tlb
#define pud_free_tlb(tlb, pudp, address) \
do { \
@@ -594,9 +617,7 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
__pud_free_tlb(tlb, pudp, address); \
} while (0)
#endif
-#endif
-#ifndef __ARCH_HAS_5LEVEL_HACK
#ifndef p4d_free_tlb
#define p4d_free_tlb(tlb, pudp, address) \
do { \
@@ -605,7 +626,6 @@ static inline void tlb_end_vma(struct mmu_gather *tlb, struct vm_area_struct *vm
__p4d_free_tlb(tlb, pudp, address); \
} while (0)
#endif
-#endif
#endif /* CONFIG_MMU */
diff --git a/include/asm-generic/vdso/vsyscall.h b/include/asm-generic/vdso/vsyscall.h
index ce4103208619..c835607f78ae 100644
--- a/include/asm-generic/vdso/vsyscall.h
+++ b/include/asm-generic/vdso/vsyscall.h
@@ -11,20 +11,6 @@ static __always_inline struct vdso_data *__arch_get_k_vdso_data(void)
}
#endif /* __arch_get_k_vdso_data */
-#ifndef __arch_update_vdso_data
-static __always_inline int __arch_update_vdso_data(void)
-{
- return 0;
-}
-#endif /* __arch_update_vdso_data */
-
-#ifndef __arch_get_clock_mode
-static __always_inline int __arch_get_clock_mode(struct timekeeper *tk)
-{
- return 0;
-}
-#endif /* __arch_get_clock_mode */
-
#ifndef __arch_update_vsyscall
static __always_inline void __arch_update_vsyscall(struct vdso_data *vdata,
struct timekeeper *tk)
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index dae64600ccbf..2444336ef04c 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -23,12 +23,11 @@
* _etext = .;
*
* _sdata = .;
- * RO_DATA_SECTION(PAGE_SIZE)
- * RW_DATA_SECTION(...)
+ * RO_DATA(PAGE_SIZE)
+ * RW_DATA(...)
* _edata = .;
*
* EXCEPTION_TABLE(...)
- * NOTES
*
* BSS_SECTION(0, 0, 0)
* _end = .;
@@ -54,6 +53,33 @@
#define LOAD_OFFSET 0
#endif
+/*
+ * Only some architectures want to have the .notes segment visible in
+ * a separate PT_NOTE ELF Program Header. When this happens, it needs
+ * to be visible in both the kernel text's PT_LOAD and the PT_NOTE
+ * Program Headers. In this case, though, the PT_LOAD needs to be made
+ * the default again so that all the following sections don't also end
+ * up in the PT_NOTE Program Header.
+ */
+#ifdef EMITS_PT_NOTE
+#define NOTES_HEADERS :text :note
+#define NOTES_HEADERS_RESTORE __restore_ph : { *(.__restore_ph) } :text
+#else
+#define NOTES_HEADERS
+#define NOTES_HEADERS_RESTORE
+#endif
+
+/*
+ * Some architectures have non-executable read-only exception tables.
+ * They can be added to the RO_DATA segment by specifying their desired
+ * alignment.
+ */
+#ifdef RO_EXCEPTION_TABLE_ALIGN
+#define RO_EXCEPTION_TABLE EXCEPTION_TABLE(RO_EXCEPTION_TABLE_ALIGN)
+#else
+#define RO_EXCEPTION_TABLE
+#endif
+
/* Align . to a 8 byte boundary equals to maximum function alignment. */
#define ALIGN_FUNCTION() . = ALIGN(8)
@@ -110,19 +136,28 @@
#endif
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
-#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
-#define MCOUNT_REC() . = ALIGN(8); \
- __start_mcount_loc = .; \
- KEEP(*(__patchable_function_entries)) \
- __stop_mcount_loc = .;
-#else
+/*
+ * The ftrace call sites are logged to a section whose name depends on the
+ * compiler option used. A given kernel image will only use one, AKA
+ * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header
+ * dependencies for FTRACE_CALLSITE_SECTION's definition.
+ *
+ * Need to also make ftrace_stub_graph point to ftrace_stub
+ * so that the same stub location may have different protocols
+ * and not mess up with C verifiers.
+ */
#define MCOUNT_REC() . = ALIGN(8); \
__start_mcount_loc = .; \
KEEP(*(__mcount_loc)) \
- __stop_mcount_loc = .;
-#endif
+ KEEP(*(__patchable_function_entries)) \
+ __stop_mcount_loc = .; \
+ ftrace_stub_graph = ftrace_stub;
#else
-#define MCOUNT_REC()
+# ifdef CONFIG_FUNCTION_TRACER
+# define MCOUNT_REC() ftrace_stub_graph = ftrace_stub;
+# else
+# define MCOUNT_REC()
+# endif
#endif
#ifdef CONFIG_TRACE_BRANCH_PROFILING
@@ -348,7 +383,7 @@
/*
* Read only Data
*/
-#define RO_DATA_SECTION(align) \
+#define RO_DATA(align) \
. = ALIGN((align)); \
.rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
__start_rodata = .; \
@@ -496,15 +531,13 @@
__start___modver = .; \
KEEP(*(__modver)) \
__stop___modver = .; \
- . = ALIGN((align)); \
- __end_rodata = .; \
} \
- . = ALIGN((align));
-
-/* RODATA & RO_DATA provided for backward compatibility.
- * All archs are supposed to use RO_DATA() */
-#define RODATA RO_DATA_SECTION(4096)
-#define RO_DATA(align) RO_DATA_SECTION(align)
+ \
+ RO_EXCEPTION_TABLE \
+ NOTES \
+ \
+ . = ALIGN((align)); \
+ __end_rodata = .;
/*
* .text section. Map to function alignment to avoid address changes
@@ -790,7 +823,8 @@
__start_notes = .; \
KEEP(*(.note.*)) \
__stop_notes = .; \
- }
+ } NOTES_HEADERS \
+ NOTES_HEADERS_RESTORE
#define INIT_SETUP(initsetup_align) \
. = ALIGN(initsetup_align); \
@@ -860,10 +894,17 @@
* section definitions so that such archs put those in earlier section
* definitions.
*/
+#ifdef RUNTIME_DISCARD_EXIT
+#define EXIT_DISCARDS
+#else
+#define EXIT_DISCARDS \
+ EXIT_TEXT \
+ EXIT_DATA
+#endif
+
#define DISCARDS \
/DISCARD/ : { \
- EXIT_TEXT \
- EXIT_DATA \
+ EXIT_DISCARDS \
EXIT_CALL \
*(.discard) \
*(.discard.*) \
@@ -962,7 +1003,7 @@
* matches the requirement of PAGE_ALIGNED_DATA.
*
* use 0 as page_align if page_aligned data is not used */
-#define RW_DATA_SECTION(cacheline, pagealigned, inittask) \
+#define RW_DATA(cacheline, pagealigned, inittask) \
. = ALIGN(PAGE_SIZE); \
.data : AT(ADDR(.data) - LOAD_OFFSET) { \
INIT_TASK_DATA(inittask) \
diff --git a/include/clocksource/hyperv_timer.h b/include/clocksource/hyperv_timer.h
index 422f5e5237be..34eef083c988 100644
--- a/include/clocksource/hyperv_timer.h
+++ b/include/clocksource/hyperv_timer.h
@@ -21,15 +21,16 @@
#define HV_MIN_DELTA_TICKS 1
/* Routines called by the VMbus driver */
-extern int hv_stimer_alloc(int sint);
+extern int hv_stimer_alloc(void);
extern void hv_stimer_free(void);
-extern void hv_stimer_init(unsigned int cpu);
-extern void hv_stimer_cleanup(unsigned int cpu);
+extern int hv_stimer_cleanup(unsigned int cpu);
+extern void hv_stimer_legacy_init(unsigned int cpu, int sint);
+extern void hv_stimer_legacy_cleanup(unsigned int cpu);
extern void hv_stimer_global_cleanup(void);
extern void hv_stimer0_isr(void);
#ifdef CONFIG_HYPERV_TIMER
-extern struct clocksource *hyperv_cs;
+extern u64 (*hv_read_reference_counter)(void);
extern void hv_init_clocksource(void);
extern struct ms_hyperv_tsc_page *hv_get_tsc_page(void);
diff --git a/include/clocksource/timer-ti-dm.h b/include/clocksource/timer-ti-dm.h
index 7d9598dc578d..25f05235866e 100644
--- a/include/clocksource/timer-ti-dm.h
+++ b/include/clocksource/timer-ti-dm.h
@@ -105,17 +105,17 @@ struct omap_dm_timer {
void __iomem *pend; /* write pending */
void __iomem *func_base; /* function register base */
+ atomic_t enabled;
unsigned long rate;
unsigned reserved:1;
unsigned posted:1;
struct timer_regs context;
- int (*get_context_loss_count)(struct device *);
- int ctx_loss_count;
int revision;
u32 capability;
u32 errata;
struct platform_device *pdev;
struct list_head node;
+ struct notifier_block nb;
};
int omap_dm_timer_reserve_systimer(int id);
diff --git a/include/crypto/aead.h b/include/crypto/aead.h
index 3c245b1859e7..1b3ebe8593c0 100644
--- a/include/crypto/aead.h
+++ b/include/crypto/aead.h
@@ -227,6 +227,16 @@ static inline unsigned int crypto_aead_authsize(struct crypto_aead *tfm)
return tfm->authsize;
}
+static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
+{
+ return alg->maxauthsize;
+}
+
+static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
+{
+ return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
+}
+
/**
* crypto_aead_blocksize() - obtain block size of cipher
* @tfm: cipher handle
@@ -321,7 +331,7 @@ int crypto_aead_encrypt(struct aead_request *req);
/**
* crypto_aead_decrypt() - decrypt ciphertext
- * @req: reference to the ablkcipher_request handle that holds all information
+ * @req: reference to the aead_request handle that holds all information
* needed to perform the cipher operation
*
* Decrypt ciphertext data using the aead_request handle. That data structure
diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
index e5bd302f2c49..e115f9215ed5 100644
--- a/include/crypto/algapi.h
+++ b/include/crypto/algapi.h
@@ -47,7 +47,13 @@ struct crypto_instance {
struct crypto_alg alg;
struct crypto_template *tmpl;
- struct hlist_node list;
+
+ union {
+ /* Node in list of instances after registration. */
+ struct hlist_node list;
+ /* List of attached spawns before registration. */
+ struct crypto_spawn *spawns;
+ };
void *__ctx[] CRYPTO_MINALIGN_ATTR;
};
@@ -57,8 +63,6 @@ struct crypto_template {
struct hlist_head instances;
struct module *module;
- struct crypto_instance *(*alloc)(struct rtattr **tb);
- void (*free)(struct crypto_instance *inst);
int (*create)(struct crypto_template *tmpl, struct rtattr **tb);
char name[CRYPTO_MAX_ALG_NAME];
@@ -67,9 +71,16 @@ struct crypto_template {
struct crypto_spawn {
struct list_head list;
struct crypto_alg *alg;
- struct crypto_instance *inst;
+ union {
+ /* Back pointer to instance after registration.*/
+ struct crypto_instance *inst;
+ /* Spawn list pointer prior to registration. */
+ struct crypto_spawn *next;
+ };
const struct crypto_type *frontend;
u32 mask;
+ bool dead;
+ bool registered;
};
struct crypto_queue {
@@ -85,56 +96,6 @@ struct scatter_walk {
unsigned int offset;
};
-struct blkcipher_walk {
- union {
- struct {
- struct page *page;
- unsigned long offset;
- } phys;
-
- struct {
- u8 *page;
- u8 *addr;
- } virt;
- } src, dst;
-
- struct scatter_walk in;
- unsigned int nbytes;
-
- struct scatter_walk out;
- unsigned int total;
-
- void *page;
- u8 *buffer;
- u8 *iv;
- unsigned int ivsize;
-
- int flags;
- unsigned int walk_blocksize;
- unsigned int cipher_blocksize;
- unsigned int alignmask;
-};
-
-struct ablkcipher_walk {
- struct {
- struct page *page;
- unsigned int offset;
- } src, dst;
-
- struct scatter_walk in;
- unsigned int nbytes;
- struct scatter_walk out;
- unsigned int total;
- struct list_head buffers;
- u8 *iv_buffer;
- u8 *iv;
- int flags;
- unsigned int blocksize;
-};
-
-extern const struct crypto_type crypto_ablkcipher_type;
-extern const struct crypto_type crypto_blkcipher_type;
-
void crypto_mod_put(struct crypto_alg *alg);
int crypto_register_template(struct crypto_template *tmpl);
@@ -145,45 +106,21 @@ struct crypto_template *crypto_lookup_template(const char *name);
int crypto_register_instance(struct crypto_template *tmpl,
struct crypto_instance *inst);
-int crypto_unregister_instance(struct crypto_instance *inst);
-
-int crypto_init_spawn(struct crypto_spawn *spawn, struct crypto_alg *alg,
- struct crypto_instance *inst, u32 mask);
-int crypto_init_spawn2(struct crypto_spawn *spawn, struct crypto_alg *alg,
- struct crypto_instance *inst,
- const struct crypto_type *frontend);
-int crypto_grab_spawn(struct crypto_spawn *spawn, const char *name,
- u32 type, u32 mask);
+void crypto_unregister_instance(struct crypto_instance *inst);
+int crypto_grab_spawn(struct crypto_spawn *spawn, struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
void crypto_drop_spawn(struct crypto_spawn *spawn);
struct crypto_tfm *crypto_spawn_tfm(struct crypto_spawn *spawn, u32 type,
u32 mask);
void *crypto_spawn_tfm2(struct crypto_spawn *spawn);
-static inline void crypto_set_spawn(struct crypto_spawn *spawn,
- struct crypto_instance *inst)
-{
- spawn->inst = inst;
-}
-
struct crypto_attr_type *crypto_get_attr_type(struct rtattr **tb);
int crypto_check_attr_type(struct rtattr **tb, u32 type);
const char *crypto_attr_alg_name(struct rtattr *rta);
-struct crypto_alg *crypto_attr_alg2(struct rtattr *rta,
- const struct crypto_type *frontend,
- u32 type, u32 mask);
-
-static inline struct crypto_alg *crypto_attr_alg(struct rtattr *rta,
- u32 type, u32 mask)
-{
- return crypto_attr_alg2(rta, NULL, type, mask);
-}
-
int crypto_attr_u32(struct rtattr *rta, u32 *num);
int crypto_inst_setname(struct crypto_instance *inst, const char *name,
struct crypto_alg *alg);
-void *crypto_alloc_instance(const char *name, struct crypto_alg *alg,
- unsigned int head);
void crypto_init_queue(struct crypto_queue *queue, unsigned int max_qlen);
int crypto_enqueue_request(struct crypto_queue *queue,
@@ -233,26 +170,6 @@ static inline void crypto_xor_cpy(u8 *dst, const u8 *src1, const u8 *src2,
}
}
-int blkcipher_walk_done(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk, int err);
-int blkcipher_walk_virt(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk);
-int blkcipher_walk_phys(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk);
-int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
- unsigned int blocksize);
-int blkcipher_aead_walk_virt_block(struct blkcipher_desc *desc,
- struct blkcipher_walk *walk,
- struct crypto_aead *tfm,
- unsigned int blocksize);
-
-int ablkcipher_walk_done(struct ablkcipher_request *req,
- struct ablkcipher_walk *walk, int err);
-int ablkcipher_walk_phys(struct ablkcipher_request *req,
- struct ablkcipher_walk *walk);
-void __ablkcipher_walk_complete(struct ablkcipher_walk *walk);
-
static inline void *crypto_tfm_ctx_aligned(struct crypto_tfm *tfm)
{
return PTR_ALIGN(crypto_tfm_ctx(tfm),
@@ -270,48 +187,38 @@ static inline void *crypto_instance_ctx(struct crypto_instance *inst)
return inst->__ctx;
}
-static inline struct ablkcipher_alg *crypto_ablkcipher_alg(
- struct crypto_ablkcipher *tfm)
-{
- return &crypto_ablkcipher_tfm(tfm)->__crt_alg->cra_ablkcipher;
-}
-
-static inline void *crypto_ablkcipher_ctx(struct crypto_ablkcipher *tfm)
-{
- return crypto_tfm_ctx(&tfm->base);
-}
-
-static inline void *crypto_ablkcipher_ctx_aligned(struct crypto_ablkcipher *tfm)
-{
- return crypto_tfm_ctx_aligned(&tfm->base);
-}
+struct crypto_cipher_spawn {
+ struct crypto_spawn base;
+};
-static inline struct crypto_blkcipher *crypto_spawn_blkcipher(
- struct crypto_spawn *spawn)
+static inline int crypto_grab_cipher(struct crypto_cipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask)
{
- u32 type = CRYPTO_ALG_TYPE_BLKCIPHER;
- u32 mask = CRYPTO_ALG_TYPE_MASK;
-
- return __crypto_blkcipher_cast(crypto_spawn_tfm(spawn, type, mask));
+ type &= ~CRYPTO_ALG_TYPE_MASK;
+ type |= CRYPTO_ALG_TYPE_CIPHER;
+ mask |= CRYPTO_ALG_TYPE_MASK;
+ return crypto_grab_spawn(&spawn->base, inst, name, type, mask);
}
-static inline void *crypto_blkcipher_ctx(struct crypto_blkcipher *tfm)
+static inline void crypto_drop_cipher(struct crypto_cipher_spawn *spawn)
{
- return crypto_tfm_ctx(&tfm->base);
+ crypto_drop_spawn(&spawn->base);
}
-static inline void *crypto_blkcipher_ctx_aligned(struct crypto_blkcipher *tfm)
+static inline struct crypto_alg *crypto_spawn_cipher_alg(
+ struct crypto_cipher_spawn *spawn)
{
- return crypto_tfm_ctx_aligned(&tfm->base);
+ return spawn->base.alg;
}
static inline struct crypto_cipher *crypto_spawn_cipher(
- struct crypto_spawn *spawn)
+ struct crypto_cipher_spawn *spawn)
{
u32 type = CRYPTO_ALG_TYPE_CIPHER;
u32 mask = CRYPTO_ALG_TYPE_MASK;
- return __crypto_cipher_cast(crypto_spawn_tfm(spawn, type, mask));
+ return __crypto_cipher_cast(crypto_spawn_tfm(&spawn->base, type, mask));
}
static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
@@ -319,33 +226,6 @@ static inline struct cipher_alg *crypto_cipher_alg(struct crypto_cipher *tfm)
return &crypto_cipher_tfm(tfm)->__crt_alg->cra_cipher;
}
-static inline void blkcipher_walk_init(struct blkcipher_walk *walk,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- walk->in.sg = src;
- walk->out.sg = dst;
- walk->total = nbytes;
-}
-
-static inline void ablkcipher_walk_init(struct ablkcipher_walk *walk,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- walk->in.sg = src;
- walk->out.sg = dst;
- walk->total = nbytes;
- INIT_LIST_HEAD(&walk->buffers);
-}
-
-static inline void ablkcipher_walk_complete(struct ablkcipher_walk *walk)
-{
- if (unlikely(!list_empty(&walk->buffers)))
- __ablkcipher_walk_complete(walk);
-}
-
static inline struct crypto_async_request *crypto_get_backlog(
struct crypto_queue *queue)
{
@@ -353,29 +233,6 @@ static inline struct crypto_async_request *crypto_get_backlog(
container_of(queue->backlog, struct crypto_async_request, list);
}
-static inline int ablkcipher_enqueue_request(struct crypto_queue *queue,
- struct ablkcipher_request *request)
-{
- return crypto_enqueue_request(queue, &request->base);
-}
-
-static inline struct ablkcipher_request *ablkcipher_dequeue_request(
- struct crypto_queue *queue)
-{
- return ablkcipher_request_cast(crypto_dequeue_request(queue));
-}
-
-static inline void *ablkcipher_request_ctx(struct ablkcipher_request *req)
-{
- return req->__ctx;
-}
-
-static inline struct crypto_alg *crypto_get_attr_alg(struct rtattr **tb,
- u32 type, u32 mask)
-{
- return crypto_attr_alg(tb[1], type, mask);
-}
-
static inline int crypto_requires_off(u32 type, u32 mask, u32 off)
{
return (type ^ off) & mask & off;
diff --git a/include/crypto/blake2s.h b/include/crypto/blake2s.h
new file mode 100644
index 000000000000..b471deac28ff
--- /dev/null
+++ b/include/crypto/blake2s.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef BLAKE2S_H
+#define BLAKE2S_H
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+
+#include <asm/bug.h>
+
+enum blake2s_lengths {
+ BLAKE2S_BLOCK_SIZE = 64,
+ BLAKE2S_HASH_SIZE = 32,
+ BLAKE2S_KEY_SIZE = 32,
+
+ BLAKE2S_128_HASH_SIZE = 16,
+ BLAKE2S_160_HASH_SIZE = 20,
+ BLAKE2S_224_HASH_SIZE = 28,
+ BLAKE2S_256_HASH_SIZE = 32,
+};
+
+struct blake2s_state {
+ u32 h[8];
+ u32 t[2];
+ u32 f[2];
+ u8 buf[BLAKE2S_BLOCK_SIZE];
+ unsigned int buflen;
+ unsigned int outlen;
+};
+
+enum blake2s_iv {
+ BLAKE2S_IV0 = 0x6A09E667UL,
+ BLAKE2S_IV1 = 0xBB67AE85UL,
+ BLAKE2S_IV2 = 0x3C6EF372UL,
+ BLAKE2S_IV3 = 0xA54FF53AUL,
+ BLAKE2S_IV4 = 0x510E527FUL,
+ BLAKE2S_IV5 = 0x9B05688CUL,
+ BLAKE2S_IV6 = 0x1F83D9ABUL,
+ BLAKE2S_IV7 = 0x5BE0CD19UL,
+};
+
+void blake2s_update(struct blake2s_state *state, const u8 *in, size_t inlen);
+void blake2s_final(struct blake2s_state *state, u8 *out);
+
+static inline void blake2s_init_param(struct blake2s_state *state,
+ const u32 param)
+{
+ *state = (struct blake2s_state){{
+ BLAKE2S_IV0 ^ param,
+ BLAKE2S_IV1,
+ BLAKE2S_IV2,
+ BLAKE2S_IV3,
+ BLAKE2S_IV4,
+ BLAKE2S_IV5,
+ BLAKE2S_IV6,
+ BLAKE2S_IV7,
+ }};
+}
+
+static inline void blake2s_init(struct blake2s_state *state,
+ const size_t outlen)
+{
+ blake2s_init_param(state, 0x01010000 | outlen);
+ state->outlen = outlen;
+}
+
+static inline void blake2s_init_key(struct blake2s_state *state,
+ const size_t outlen, const void *key,
+ const size_t keylen)
+{
+ WARN_ON(IS_ENABLED(DEBUG) && (!outlen || outlen > BLAKE2S_HASH_SIZE ||
+ !key || !keylen || keylen > BLAKE2S_KEY_SIZE));
+
+ blake2s_init_param(state, 0x01010000 | keylen << 8 | outlen);
+ memcpy(state->buf, key, keylen);
+ state->buflen = BLAKE2S_BLOCK_SIZE;
+ state->outlen = outlen;
+}
+
+static inline void blake2s(u8 *out, const u8 *in, const u8 *key,
+ const size_t outlen, const size_t inlen,
+ const size_t keylen)
+{
+ struct blake2s_state state;
+
+ WARN_ON(IS_ENABLED(DEBUG) && ((!in && inlen > 0) || !out || !outlen ||
+ outlen > BLAKE2S_HASH_SIZE || keylen > BLAKE2S_KEY_SIZE ||
+ (!key && keylen)));
+
+ if (keylen)
+ blake2s_init_key(&state, outlen, key, keylen);
+ else
+ blake2s_init(&state, outlen);
+
+ blake2s_update(&state, in, inlen);
+ blake2s_final(&state, out);
+}
+
+void blake2s256_hmac(u8 *out, const u8 *in, const u8 *key, const size_t inlen,
+ const size_t keylen);
+
+#endif /* BLAKE2S_H */
diff --git a/include/crypto/cast6.h b/include/crypto/cast6.h
index c71f6ef47f0f..38f490cd50a8 100644
--- a/include/crypto/cast6.h
+++ b/include/crypto/cast6.h
@@ -15,11 +15,10 @@ struct cast6_ctx {
u8 Kr[12][4];
};
-int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key,
- unsigned int keylen, u32 *flags);
+int __cast6_setkey(struct cast6_ctx *ctx, const u8 *key, unsigned int keylen);
int cast6_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
-void __cast6_encrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src);
-void __cast6_decrypt(struct cast6_ctx *ctx, u8 *dst, const u8 *src);
+void __cast6_encrypt(const void *ctx, u8 *dst, const u8 *src);
+void __cast6_decrypt(const void *ctx, u8 *dst, const u8 *src);
#endif
diff --git a/include/crypto/chacha.h b/include/crypto/chacha.h
index d1e723c6a37d..2676f4fbd4c1 100644
--- a/include/crypto/chacha.h
+++ b/include/crypto/chacha.h
@@ -15,9 +15,8 @@
#ifndef _CRYPTO_CHACHA_H
#define _CRYPTO_CHACHA_H
-#include <crypto/skcipher.h>
+#include <asm/unaligned.h>
#include <linux/types.h>
-#include <linux/crypto.h>
/* 32-bit stream position, then 96-bit nonce (RFC7539 convention) */
#define CHACHA_IV_SIZE 16
@@ -26,29 +25,79 @@
#define CHACHA_BLOCK_SIZE 64
#define CHACHAPOLY_IV_SIZE 12
+#ifdef CONFIG_X86_64
+#define CHACHA_STATE_WORDS ((CHACHA_BLOCK_SIZE + 12) / sizeof(u32))
+#else
+#define CHACHA_STATE_WORDS (CHACHA_BLOCK_SIZE / sizeof(u32))
+#endif
+
/* 192-bit nonce, then 64-bit stream position */
#define XCHACHA_IV_SIZE 32
-struct chacha_ctx {
- u32 key[8];
- int nrounds;
-};
-
-void chacha_block(u32 *state, u8 *stream, int nrounds);
+void chacha_block_generic(u32 *state, u8 *stream, int nrounds);
static inline void chacha20_block(u32 *state, u8 *stream)
{
- chacha_block(state, stream, 20);
+ chacha_block_generic(state, stream, 20);
+}
+
+void hchacha_block_arch(const u32 *state, u32 *out, int nrounds);
+void hchacha_block_generic(const u32 *state, u32 *out, int nrounds);
+
+static inline void hchacha_block(const u32 *state, u32 *out, int nrounds)
+{
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
+ hchacha_block_arch(state, out, nrounds);
+ else
+ hchacha_block_generic(state, out, nrounds);
+}
+
+void chacha_init_arch(u32 *state, const u32 *key, const u8 *iv);
+static inline void chacha_init_generic(u32 *state, const u32 *key, const u8 *iv)
+{
+ state[0] = 0x61707865; /* "expa" */
+ state[1] = 0x3320646e; /* "nd 3" */
+ state[2] = 0x79622d32; /* "2-by" */
+ state[3] = 0x6b206574; /* "te k" */
+ state[4] = key[0];
+ state[5] = key[1];
+ state[6] = key[2];
+ state[7] = key[3];
+ state[8] = key[4];
+ state[9] = key[5];
+ state[10] = key[6];
+ state[11] = key[7];
+ state[12] = get_unaligned_le32(iv + 0);
+ state[13] = get_unaligned_le32(iv + 4);
+ state[14] = get_unaligned_le32(iv + 8);
+ state[15] = get_unaligned_le32(iv + 12);
+}
+
+static inline void chacha_init(u32 *state, const u32 *key, const u8 *iv)
+{
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
+ chacha_init_arch(state, key, iv);
+ else
+ chacha_init_generic(state, key, iv);
}
-void hchacha_block(const u32 *in, u32 *out, int nrounds);
-void crypto_chacha_init(u32 *state, const struct chacha_ctx *ctx, const u8 *iv);
+void chacha_crypt_arch(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds);
+void chacha_crypt_generic(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds);
-int crypto_chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize);
-int crypto_chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keysize);
+static inline void chacha_crypt(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes, int nrounds)
+{
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CHACHA))
+ chacha_crypt_arch(state, dst, src, bytes, nrounds);
+ else
+ chacha_crypt_generic(state, dst, src, bytes, nrounds);
+}
-int crypto_chacha_crypt(struct skcipher_request *req);
-int crypto_xchacha_crypt(struct skcipher_request *req);
+static inline void chacha20_crypt(u32 *state, u8 *dst, const u8 *src,
+ unsigned int bytes)
+{
+ chacha_crypt(state, dst, src, bytes, 20);
+}
#endif /* _CRYPTO_CHACHA_H */
diff --git a/include/crypto/chacha20poly1305.h b/include/crypto/chacha20poly1305.h
new file mode 100644
index 000000000000..234ee28078ef
--- /dev/null
+++ b/include/crypto/chacha20poly1305.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef __CHACHA20POLY1305_H
+#define __CHACHA20POLY1305_H
+
+#include <linux/types.h>
+#include <linux/scatterlist.h>
+
+enum chacha20poly1305_lengths {
+ XCHACHA20POLY1305_NONCE_SIZE = 24,
+ CHACHA20POLY1305_KEY_SIZE = 32,
+ CHACHA20POLY1305_AUTHTAG_SIZE = 16
+};
+
+void chacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+
+bool __must_check
+chacha20poly1305_decrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len, const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+
+void xchacha20poly1305_encrypt(u8 *dst, const u8 *src, const size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
+ const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+
+bool __must_check xchacha20poly1305_decrypt(
+ u8 *dst, const u8 *src, const size_t src_len, const u8 *ad,
+ const size_t ad_len, const u8 nonce[XCHACHA20POLY1305_NONCE_SIZE],
+ const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+
+bool chacha20poly1305_encrypt_sg_inplace(struct scatterlist *src, size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+
+bool chacha20poly1305_decrypt_sg_inplace(struct scatterlist *src, size_t src_len,
+ const u8 *ad, const size_t ad_len,
+ const u64 nonce,
+ const u8 key[CHACHA20POLY1305_KEY_SIZE]);
+
+#endif /* __CHACHA20POLY1305_H */
diff --git a/include/crypto/curve25519.h b/include/crypto/curve25519.h
new file mode 100644
index 000000000000..9ecb3c1f0f15
--- /dev/null
+++ b/include/crypto/curve25519.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ */
+
+#ifndef CURVE25519_H
+#define CURVE25519_H
+
+#include <crypto/algapi.h> // For crypto_memneq.
+#include <linux/types.h>
+#include <linux/random.h>
+
+enum curve25519_lengths {
+ CURVE25519_KEY_SIZE = 32
+};
+
+extern const u8 curve25519_null_point[];
+extern const u8 curve25519_base_point[];
+
+void curve25519_generic(u8 out[CURVE25519_KEY_SIZE],
+ const u8 scalar[CURVE25519_KEY_SIZE],
+ const u8 point[CURVE25519_KEY_SIZE]);
+
+void curve25519_arch(u8 out[CURVE25519_KEY_SIZE],
+ const u8 scalar[CURVE25519_KEY_SIZE],
+ const u8 point[CURVE25519_KEY_SIZE]);
+
+void curve25519_base_arch(u8 pub[CURVE25519_KEY_SIZE],
+ const u8 secret[CURVE25519_KEY_SIZE]);
+
+static inline
+bool __must_check curve25519(u8 mypublic[CURVE25519_KEY_SIZE],
+ const u8 secret[CURVE25519_KEY_SIZE],
+ const u8 basepoint[CURVE25519_KEY_SIZE])
+{
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519) &&
+ (!IS_ENABLED(CONFIG_CRYPTO_CURVE25519_X86) || IS_ENABLED(CONFIG_AS_ADX)))
+ curve25519_arch(mypublic, secret, basepoint);
+ else
+ curve25519_generic(mypublic, secret, basepoint);
+ return crypto_memneq(mypublic, curve25519_null_point,
+ CURVE25519_KEY_SIZE);
+}
+
+static inline bool
+__must_check curve25519_generate_public(u8 pub[CURVE25519_KEY_SIZE],
+ const u8 secret[CURVE25519_KEY_SIZE])
+{
+ if (unlikely(!crypto_memneq(secret, curve25519_null_point,
+ CURVE25519_KEY_SIZE)))
+ return false;
+
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_CURVE25519) &&
+ (!IS_ENABLED(CONFIG_CRYPTO_CURVE25519_X86) || IS_ENABLED(CONFIG_AS_ADX)))
+ curve25519_base_arch(pub, secret);
+ else
+ curve25519_generic(pub, secret, curve25519_base_point);
+ return crypto_memneq(pub, curve25519_null_point, CURVE25519_KEY_SIZE);
+}
+
+static inline void curve25519_clamp_secret(u8 secret[CURVE25519_KEY_SIZE])
+{
+ secret[0] &= 248;
+ secret[31] = (secret[31] & 127) | 64;
+}
+
+static inline void curve25519_generate_secret(u8 secret[CURVE25519_KEY_SIZE])
+{
+ get_random_bytes_wait(secret, CURVE25519_KEY_SIZE);
+ curve25519_clamp_secret(secret);
+}
+
+#endif /* CURVE25519_H */
diff --git a/include/crypto/engine.h b/include/crypto/engine.h
index 84c708bba00b..e29cd67f93c7 100644
--- a/include/crypto/engine.h
+++ b/include/crypto/engine.h
@@ -83,8 +83,6 @@ struct crypto_engine_ctx {
struct crypto_engine_op op;
};
-int crypto_transfer_ablkcipher_request_to_engine(struct crypto_engine *engine,
- struct ablkcipher_request *req);
int crypto_transfer_aead_request_to_engine(struct crypto_engine *engine,
struct aead_request *req);
int crypto_transfer_akcipher_request_to_engine(struct crypto_engine *engine,
@@ -93,8 +91,6 @@ int crypto_transfer_hash_request_to_engine(struct crypto_engine *engine,
struct ahash_request *req);
int crypto_transfer_skcipher_request_to_engine(struct crypto_engine *engine,
struct skcipher_request *req);
-void crypto_finalize_ablkcipher_request(struct crypto_engine *engine,
- struct ablkcipher_request *req, int err);
void crypto_finalize_aead_request(struct crypto_engine *engine,
struct aead_request *req, int err);
void crypto_finalize_akcipher_request(struct crypto_engine *engine,
diff --git a/include/crypto/hash.h b/include/crypto/hash.h
index d52b95b75ae4..cee446c59497 100644
--- a/include/crypto/hash.h
+++ b/include/crypto/hash.h
@@ -169,6 +169,17 @@ struct shash_desc {
* @export: see struct ahash_alg
* @import: see struct ahash_alg
* @setkey: see struct ahash_alg
+ * @init_tfm: Initialize the cryptographic transformation object.
+ * This function is called only once at the instantiation
+ * time, right after the transformation context was
+ * allocated. In case the cryptographic hardware has
+ * some special requirements which need to be handled
+ * by software, this function shall check for the precise
+ * requirement of the transformation and put any software
+ * fallbacks in place.
+ * @exit_tfm: Deinitialize the cryptographic transformation object.
+ * This is a counterpart to @init_tfm, used to remove
+ * various changes set in @init_tfm.
* @digestsize: see struct ahash_alg
* @statesize: see struct ahash_alg
* @descsize: Size of the operational state for the message digest. This state
@@ -189,6 +200,8 @@ struct shash_alg {
int (*import)(struct shash_desc *desc, const void *in);
int (*setkey)(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
+ int (*init_tfm)(struct crypto_shash *tfm);
+ void (*exit_tfm)(struct crypto_shash *tfm);
unsigned int descsize;
@@ -227,7 +240,7 @@ struct crypto_shash {
* CRYPTO_ALG_TYPE_AHASH (listed as type "ahash" in /proc/crypto)
*
* The asynchronous cipher operation discussion provided for the
- * CRYPTO_ALG_TYPE_ABLKCIPHER API applies here as well.
+ * CRYPTO_ALG_TYPE_SKCIPHER API applies here as well.
*/
static inline struct crypto_ahash *__crypto_ahash_cast(struct crypto_tfm *tfm)
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index 9de57367afbb..cf478681b53e 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -68,10 +68,8 @@ int crypto_register_acomp(struct acomp_alg *alg);
* compression algorithm
*
* @alg: algorithm definition
- *
- * Return: zero on success; error code in case of error
*/
-int crypto_unregister_acomp(struct acomp_alg *alg);
+void crypto_unregister_acomp(struct acomp_alg *alg);
int crypto_register_acomps(struct acomp_alg *algs, int count);
void crypto_unregister_acomps(struct acomp_alg *algs, int count);
diff --git a/include/crypto/internal/aead.h b/include/crypto/internal/aead.h
index c509ec30fc65..27b7b0224ea6 100644
--- a/include/crypto/internal/aead.h
+++ b/include/crypto/internal/aead.h
@@ -81,14 +81,9 @@ static inline struct aead_request *aead_request_cast(
return container_of(req, struct aead_request, base);
}
-static inline void crypto_set_aead_spawn(
- struct crypto_aead_spawn *spawn, struct crypto_instance *inst)
-{
- crypto_set_spawn(&spawn->base, inst);
-}
-
-int crypto_grab_aead(struct crypto_aead_spawn *spawn, const char *name,
- u32 type, u32 mask);
+int crypto_grab_aead(struct crypto_aead_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
static inline void crypto_drop_aead(struct crypto_aead_spawn *spawn)
{
@@ -113,16 +108,6 @@ static inline void crypto_aead_set_reqsize(struct crypto_aead *aead,
aead->reqsize = reqsize;
}
-static inline unsigned int crypto_aead_alg_maxauthsize(struct aead_alg *alg)
-{
- return alg->maxauthsize;
-}
-
-static inline unsigned int crypto_aead_maxauthsize(struct crypto_aead *aead)
-{
- return crypto_aead_alg_maxauthsize(crypto_aead_alg(aead));
-}
-
static inline void aead_init_queue(struct aead_queue *queue,
unsigned int max_qlen)
{
diff --git a/include/crypto/internal/akcipher.h b/include/crypto/internal/akcipher.h
index d6c8a42789ad..8d3220c9ab77 100644
--- a/include/crypto/internal/akcipher.h
+++ b/include/crypto/internal/akcipher.h
@@ -78,15 +78,9 @@ static inline void *akcipher_instance_ctx(struct akcipher_instance *inst)
return crypto_instance_ctx(akcipher_crypto_instance(inst));
}
-static inline void crypto_set_akcipher_spawn(
- struct crypto_akcipher_spawn *spawn,
- struct crypto_instance *inst)
-{
- crypto_set_spawn(&spawn->base, inst);
-}
-
-int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn, const char *name,
- u32 type, u32 mask);
+int crypto_grab_akcipher(struct crypto_akcipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
static inline struct crypto_akcipher *crypto_spawn_akcipher(
struct crypto_akcipher_spawn *spawn)
diff --git a/include/crypto/internal/blake2s.h b/include/crypto/internal/blake2s.h
new file mode 100644
index 000000000000..74ff77032e52
--- /dev/null
+++ b/include/crypto/internal/blake2s.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 OR MIT */
+
+#ifndef BLAKE2S_INTERNAL_H
+#define BLAKE2S_INTERNAL_H
+
+#include <crypto/blake2s.h>
+
+struct blake2s_tfm_ctx {
+ u8 key[BLAKE2S_KEY_SIZE];
+ unsigned int keylen;
+};
+
+void blake2s_compress_generic(struct blake2s_state *state,const u8 *block,
+ size_t nblocks, const u32 inc);
+
+void blake2s_compress_arch(struct blake2s_state *state,const u8 *block,
+ size_t nblocks, const u32 inc);
+
+static inline void blake2s_set_lastblock(struct blake2s_state *state)
+{
+ state->f[0] = -1;
+}
+
+#endif /* BLAKE2S_INTERNAL_H */
diff --git a/include/crypto/internal/chacha.h b/include/crypto/internal/chacha.h
new file mode 100644
index 000000000000..b085dc1ac151
--- /dev/null
+++ b/include/crypto/internal/chacha.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _CRYPTO_INTERNAL_CHACHA_H
+#define _CRYPTO_INTERNAL_CHACHA_H
+
+#include <crypto/chacha.h>
+#include <crypto/internal/skcipher.h>
+#include <linux/crypto.h>
+
+struct chacha_ctx {
+ u32 key[8];
+ int nrounds;
+};
+
+static inline int chacha_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize, int nrounds)
+{
+ struct chacha_ctx *ctx = crypto_skcipher_ctx(tfm);
+ int i;
+
+ if (keysize != CHACHA_KEY_SIZE)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->key); i++)
+ ctx->key[i] = get_unaligned_le32(key + i * sizeof(u32));
+
+ ctx->nrounds = nrounds;
+ return 0;
+}
+
+static inline int chacha20_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize)
+{
+ return chacha_setkey(tfm, key, keysize, 20);
+}
+
+static inline int chacha12_setkey(struct crypto_skcipher *tfm, const u8 *key,
+ unsigned int keysize)
+{
+ return chacha_setkey(tfm, key, keysize, 12);
+}
+
+#endif /* _CRYPTO_CHACHA_H */
diff --git a/include/crypto/internal/des.h b/include/crypto/internal/des.h
index 81ea1a425e9c..723fe5bf16da 100644
--- a/include/crypto/internal/des.h
+++ b/include/crypto/internal/des.h
@@ -35,10 +35,6 @@ static inline int crypto_des_verify_key(struct crypto_tfm *tfm, const u8 *key)
else
err = 0;
}
-
- if (err)
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
-
memzero_explicit(&tmp, sizeof(tmp));
return err;
}
@@ -95,14 +91,9 @@ bad:
static inline int crypto_des3_ede_verify_key(struct crypto_tfm *tfm,
const u8 *key)
{
- int err;
-
- err = des3_ede_verify_key(key, DES3_EDE_KEY_SIZE,
- crypto_tfm_get_flags(tfm) &
- CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
- if (err)
- crypto_tfm_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
- return err;
+ return des3_ede_verify_key(key, DES3_EDE_KEY_SIZE,
+ crypto_tfm_get_flags(tfm) &
+ CRYPTO_TFM_REQ_FORBID_WEAK_KEYS);
}
static inline int verify_skcipher_des_key(struct crypto_skcipher *tfm,
@@ -117,35 +108,19 @@ static inline int verify_skcipher_des3_key(struct crypto_skcipher *tfm,
return crypto_des3_ede_verify_key(crypto_skcipher_tfm(tfm), key);
}
-static inline int verify_ablkcipher_des_key(struct crypto_ablkcipher *tfm,
- const u8 *key)
-{
- return crypto_des_verify_key(crypto_ablkcipher_tfm(tfm), key);
-}
-
-static inline int verify_ablkcipher_des3_key(struct crypto_ablkcipher *tfm,
- const u8 *key)
-{
- return crypto_des3_ede_verify_key(crypto_ablkcipher_tfm(tfm), key);
-}
-
static inline int verify_aead_des_key(struct crypto_aead *tfm, const u8 *key,
int keylen)
{
- if (keylen != DES_KEY_SIZE) {
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != DES_KEY_SIZE)
return -EINVAL;
- }
return crypto_des_verify_key(crypto_aead_tfm(tfm), key);
}
static inline int verify_aead_des3_key(struct crypto_aead *tfm, const u8 *key,
int keylen)
{
- if (keylen != DES3_EDE_KEY_SIZE) {
- crypto_aead_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen != DES3_EDE_KEY_SIZE)
return -EINVAL;
- }
return crypto_des3_ede_verify_key(crypto_aead_tfm(tfm), key);
}
diff --git a/include/crypto/internal/geniv.h b/include/crypto/internal/geniv.h
index 0108c0c7b2ed..229d37681a9d 100644
--- a/include/crypto/internal/geniv.h
+++ b/include/crypto/internal/geniv.h
@@ -21,7 +21,6 @@ struct aead_geniv_ctx {
struct aead_instance *aead_geniv_alloc(struct crypto_template *tmpl,
struct rtattr **tb, u32 type, u32 mask);
-void aead_geniv_free(struct aead_instance *inst);
int aead_init_geniv(struct crypto_aead *tfm);
void aead_exit_geniv(struct crypto_aead *tfm);
diff --git a/include/crypto/internal/hash.h b/include/crypto/internal/hash.h
index bfc9db7b100d..89f6f46ab2b8 100644
--- a/include/crypto/internal/hash.h
+++ b/include/crypto/internal/hash.h
@@ -30,11 +30,25 @@ struct crypto_hash_walk {
};
struct ahash_instance {
- struct ahash_alg alg;
+ void (*free)(struct ahash_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct ahash_alg, halg.base)];
+ struct crypto_instance base;
+ } s;
+ struct ahash_alg alg;
+ };
};
struct shash_instance {
- struct shash_alg alg;
+ void (*free)(struct shash_instance *inst);
+ union {
+ struct {
+ char head[offsetof(struct shash_alg, base)];
+ struct crypto_instance base;
+ } s;
+ struct shash_alg alg;
+ };
};
struct crypto_ahash_spawn {
@@ -45,8 +59,6 @@ struct crypto_shash_spawn {
struct crypto_spawn base;
};
-extern const struct crypto_type crypto_ahash_type;
-
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err);
int crypto_hash_walk_first(struct ahash_request *req,
struct crypto_hash_walk *walk);
@@ -70,12 +82,11 @@ static inline int crypto_ahash_walk_last(struct crypto_hash_walk *walk)
}
int crypto_register_ahash(struct ahash_alg *alg);
-int crypto_unregister_ahash(struct ahash_alg *alg);
+void crypto_unregister_ahash(struct ahash_alg *alg);
int crypto_register_ahashes(struct ahash_alg *algs, int count);
void crypto_unregister_ahashes(struct ahash_alg *algs, int count);
int ahash_register_instance(struct crypto_template *tmpl,
struct ahash_instance *inst);
-void ahash_free_instance(struct crypto_instance *inst);
int shash_no_setkey(struct crypto_shash *tfm, const u8 *key,
unsigned int keylen);
@@ -85,37 +96,51 @@ static inline bool crypto_shash_alg_has_setkey(struct shash_alg *alg)
return alg->setkey != shash_no_setkey;
}
+static inline bool crypto_shash_alg_needs_key(struct shash_alg *alg)
+{
+ return crypto_shash_alg_has_setkey(alg) &&
+ !(alg->base.cra_flags & CRYPTO_ALG_OPTIONAL_KEY);
+}
+
bool crypto_hash_alg_has_setkey(struct hash_alg_common *halg);
-int crypto_init_ahash_spawn(struct crypto_ahash_spawn *spawn,
- struct hash_alg_common *alg,
- struct crypto_instance *inst);
+int crypto_grab_ahash(struct crypto_ahash_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
static inline void crypto_drop_ahash(struct crypto_ahash_spawn *spawn)
{
crypto_drop_spawn(&spawn->base);
}
-struct hash_alg_common *ahash_attr_alg(struct rtattr *rta, u32 type, u32 mask);
+static inline struct hash_alg_common *crypto_spawn_ahash_alg(
+ struct crypto_ahash_spawn *spawn)
+{
+ return __crypto_hash_alg_common(spawn->base.alg);
+}
int crypto_register_shash(struct shash_alg *alg);
-int crypto_unregister_shash(struct shash_alg *alg);
+void crypto_unregister_shash(struct shash_alg *alg);
int crypto_register_shashes(struct shash_alg *algs, int count);
-int crypto_unregister_shashes(struct shash_alg *algs, int count);
+void crypto_unregister_shashes(struct shash_alg *algs, int count);
int shash_register_instance(struct crypto_template *tmpl,
struct shash_instance *inst);
-void shash_free_instance(struct crypto_instance *inst);
+void shash_free_singlespawn_instance(struct shash_instance *inst);
-int crypto_init_shash_spawn(struct crypto_shash_spawn *spawn,
- struct shash_alg *alg,
- struct crypto_instance *inst);
+int crypto_grab_shash(struct crypto_shash_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
static inline void crypto_drop_shash(struct crypto_shash_spawn *spawn)
{
crypto_drop_spawn(&spawn->base);
}
-struct shash_alg *shash_attr_alg(struct rtattr *rta, u32 type, u32 mask);
+static inline struct shash_alg *crypto_spawn_shash_alg(
+ struct crypto_shash_spawn *spawn)
+{
+ return __crypto_shash_alg(spawn->base.alg);
+}
int shash_ahash_update(struct ahash_request *req, struct shash_desc *desc);
int shash_ahash_finup(struct ahash_request *req, struct shash_desc *desc);
@@ -143,13 +168,13 @@ static inline void crypto_ahash_set_reqsize(struct crypto_ahash *tfm,
static inline struct crypto_instance *ahash_crypto_instance(
struct ahash_instance *inst)
{
- return container_of(&inst->alg.halg.base, struct crypto_instance, alg);
+ return &inst->s.base;
}
static inline struct ahash_instance *ahash_instance(
struct crypto_instance *inst)
{
- return container_of(&inst->alg, struct ahash_instance, alg.halg.base);
+ return container_of(inst, struct ahash_instance, s.base);
}
static inline void *ahash_instance_ctx(struct ahash_instance *inst)
@@ -157,17 +182,6 @@ static inline void *ahash_instance_ctx(struct ahash_instance *inst)
return crypto_instance_ctx(ahash_crypto_instance(inst));
}
-static inline unsigned int ahash_instance_headroom(void)
-{
- return sizeof(struct ahash_alg) - sizeof(struct crypto_alg);
-}
-
-static inline struct ahash_instance *ahash_alloc_instance(
- const char *name, struct crypto_alg *alg)
-{
- return crypto_alloc_instance(name, alg, ahash_instance_headroom());
-}
-
static inline void ahash_request_complete(struct ahash_request *req, int err)
{
req->base.complete(&req->base, err);
@@ -204,26 +218,24 @@ static inline void *crypto_shash_ctx(struct crypto_shash *tfm)
static inline struct crypto_instance *shash_crypto_instance(
struct shash_instance *inst)
{
- return container_of(&inst->alg.base, struct crypto_instance, alg);
+ return &inst->s.base;
}
static inline struct shash_instance *shash_instance(
struct crypto_instance *inst)
{
- return container_of(__crypto_shash_alg(&inst->alg),
- struct shash_instance, alg);
+ return container_of(inst, struct shash_instance, s.base);
}
-static inline void *shash_instance_ctx(struct shash_instance *inst)
+static inline struct shash_instance *shash_alg_instance(
+ struct crypto_shash *shash)
{
- return crypto_instance_ctx(shash_crypto_instance(inst));
+ return shash_instance(crypto_tfm_alg_instance(&shash->base));
}
-static inline struct shash_instance *shash_alloc_instance(
- const char *name, struct crypto_alg *alg)
+static inline void *shash_instance_ctx(struct shash_instance *inst)
{
- return crypto_alloc_instance(name, alg,
- sizeof(struct shash_alg) - sizeof(*alg));
+ return crypto_instance_ctx(shash_crypto_instance(inst));
}
static inline struct crypto_shash *crypto_spawn_shash(
diff --git a/include/crypto/internal/poly1305.h b/include/crypto/internal/poly1305.h
new file mode 100644
index 000000000000..064e52ca5248
--- /dev/null
+++ b/include/crypto/internal/poly1305.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common values for the Poly1305 algorithm
+ */
+
+#ifndef _CRYPTO_INTERNAL_POLY1305_H
+#define _CRYPTO_INTERNAL_POLY1305_H
+
+#include <asm/unaligned.h>
+#include <linux/types.h>
+#include <crypto/poly1305.h>
+
+/*
+ * Poly1305 core functions. These only accept whole blocks; the caller must
+ * handle any needed block buffering and padding. 'hibit' must be 1 for any
+ * full blocks, or 0 for the final block if it had to be padded. If 'nonce' is
+ * non-NULL, then it's added at the end to compute the Poly1305 MAC. Otherwise,
+ * only the ε-almost-∆-universal hash function (not the full MAC) is computed.
+ */
+
+void poly1305_core_setkey(struct poly1305_core_key *key, const u8 *raw_key);
+static inline void poly1305_core_init(struct poly1305_state *state)
+{
+ *state = (struct poly1305_state){};
+}
+
+void poly1305_core_blocks(struct poly1305_state *state,
+ const struct poly1305_core_key *key, const void *src,
+ unsigned int nblocks, u32 hibit);
+void poly1305_core_emit(const struct poly1305_state *state, const u32 nonce[4],
+ void *dst);
+
+#endif
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index 6727ef0fc4d1..f834274c2493 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -112,10 +112,8 @@ int crypto_register_scomp(struct scomp_alg *alg);
* compression algorithm
*
* @alg: algorithm definition
- *
- * Return: zero on success; error code in case of error
*/
-int crypto_unregister_scomp(struct scomp_alg *alg);
+void crypto_unregister_scomp(struct scomp_alg *alg);
int crypto_register_scomps(struct scomp_alg *algs, int count);
void crypto_unregister_scomps(struct scomp_alg *algs, int count);
diff --git a/include/crypto/internal/skcipher.h b/include/crypto/internal/skcipher.h
index 734b6f7081b8..10226c12c5df 100644
--- a/include/crypto/internal/skcipher.h
+++ b/include/crypto/internal/skcipher.h
@@ -88,14 +88,9 @@ static inline void skcipher_request_complete(struct skcipher_request *req, int e
req->base.complete(&req->base, err);
}
-static inline void crypto_set_skcipher_spawn(
- struct crypto_skcipher_spawn *spawn, struct crypto_instance *inst)
-{
- crypto_set_spawn(&spawn->base, inst);
-}
-
-int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn, const char *name,
- u32 type, u32 mask);
+int crypto_grab_skcipher(struct crypto_skcipher_spawn *spawn,
+ struct crypto_instance *inst,
+ const char *name, u32 type, u32 mask);
static inline void crypto_drop_skcipher(struct crypto_skcipher_spawn *spawn)
{
@@ -140,8 +135,6 @@ int skcipher_walk_virt(struct skcipher_walk *walk,
void skcipher_walk_atomise(struct skcipher_walk *walk);
int skcipher_walk_async(struct skcipher_walk *walk,
struct skcipher_request *req);
-int skcipher_walk_aead(struct skcipher_walk *walk, struct aead_request *req,
- bool atomic);
int skcipher_walk_aead_encrypt(struct skcipher_walk *walk,
struct aead_request *req, bool atomic);
int skcipher_walk_aead_decrypt(struct skcipher_walk *walk,
@@ -153,17 +146,6 @@ static inline void skcipher_walk_abort(struct skcipher_walk *walk)
skcipher_walk_done(walk, -ECANCELED);
}
-static inline void ablkcipher_request_complete(struct ablkcipher_request *req,
- int err)
-{
- req->base.complete(&req->base, err);
-}
-
-static inline u32 ablkcipher_request_flags(struct ablkcipher_request *req)
-{
- return req->base.flags;
-}
-
static inline void *crypto_skcipher_ctx(struct crypto_skcipher *tfm)
{
return crypto_tfm_ctx(&tfm->base);
@@ -182,73 +164,22 @@ static inline u32 skcipher_request_flags(struct skcipher_request *req)
static inline unsigned int crypto_skcipher_alg_min_keysize(
struct skcipher_alg *alg)
{
- if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
- CRYPTO_ALG_TYPE_BLKCIPHER)
- return alg->base.cra_blkcipher.min_keysize;
-
- if (alg->base.cra_ablkcipher.encrypt)
- return alg->base.cra_ablkcipher.min_keysize;
-
return alg->min_keysize;
}
static inline unsigned int crypto_skcipher_alg_max_keysize(
struct skcipher_alg *alg)
{
- if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
- CRYPTO_ALG_TYPE_BLKCIPHER)
- return alg->base.cra_blkcipher.max_keysize;
-
- if (alg->base.cra_ablkcipher.encrypt)
- return alg->base.cra_ablkcipher.max_keysize;
-
return alg->max_keysize;
}
-static inline unsigned int crypto_skcipher_alg_chunksize(
- struct skcipher_alg *alg)
-{
- if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
- CRYPTO_ALG_TYPE_BLKCIPHER)
- return alg->base.cra_blocksize;
-
- if (alg->base.cra_ablkcipher.encrypt)
- return alg->base.cra_blocksize;
-
- return alg->chunksize;
-}
-
static inline unsigned int crypto_skcipher_alg_walksize(
struct skcipher_alg *alg)
{
- if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
- CRYPTO_ALG_TYPE_BLKCIPHER)
- return alg->base.cra_blocksize;
-
- if (alg->base.cra_ablkcipher.encrypt)
- return alg->base.cra_blocksize;
-
return alg->walksize;
}
/**
- * crypto_skcipher_chunksize() - obtain chunk size
- * @tfm: cipher handle
- *
- * The block size is set to one for ciphers such as CTR. However,
- * you still need to provide incremental updates in multiples of
- * the underlying block size as the IV does not have sub-block
- * granularity. This is known in this API as the chunk size.
- *
- * Return: chunk size in bytes
- */
-static inline unsigned int crypto_skcipher_chunksize(
- struct crypto_skcipher *tfm)
-{
- return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
-}
-
-/**
* crypto_skcipher_walksize() - obtain walk size
* @tfm: cipher handle
*
@@ -276,9 +207,17 @@ skcipher_cipher_simple(struct crypto_skcipher *tfm)
return ctx->cipher;
}
-struct skcipher_instance *
-skcipher_alloc_instance_simple(struct crypto_template *tmpl, struct rtattr **tb,
- struct crypto_alg **cipher_alg_ret);
+
+struct skcipher_instance *skcipher_alloc_instance_simple(
+ struct crypto_template *tmpl, struct rtattr **tb);
+
+static inline struct crypto_alg *skcipher_ialg_simple(
+ struct skcipher_instance *inst)
+{
+ struct crypto_cipher_spawn *spawn = skcipher_instance_ctx(inst);
+
+ return crypto_spawn_cipher_alg(spawn);
+}
#endif /* _CRYPTO_INTERNAL_SKCIPHER_H */
diff --git a/include/crypto/nhpoly1305.h b/include/crypto/nhpoly1305.h
index 53c04423c582..306925fea190 100644
--- a/include/crypto/nhpoly1305.h
+++ b/include/crypto/nhpoly1305.h
@@ -7,7 +7,7 @@
#define _NHPOLY1305_H
#include <crypto/hash.h>
-#include <crypto/poly1305.h>
+#include <crypto/internal/poly1305.h>
/* NH parameterization: */
@@ -33,7 +33,7 @@
#define NHPOLY1305_KEY_SIZE (POLY1305_BLOCK_SIZE + NH_KEY_BYTES)
struct nhpoly1305_key {
- struct poly1305_key poly_key;
+ struct poly1305_core_key poly_key;
u32 nh_key[NH_KEY_WORDS];
};
diff --git a/include/crypto/poly1305.h b/include/crypto/poly1305.h
index 34317ed2071e..f1f67fc749cf 100644
--- a/include/crypto/poly1305.h
+++ b/include/crypto/poly1305.h
@@ -13,52 +13,85 @@
#define POLY1305_KEY_SIZE 32
#define POLY1305_DIGEST_SIZE 16
+/* The poly1305_key and poly1305_state types are mostly opaque and
+ * implementation-defined. Limbs might be in base 2^64 or base 2^26, or
+ * different yet. The union type provided keeps these 64-bit aligned for the
+ * case in which this is implemented using 64x64 multiplies.
+ */
+
struct poly1305_key {
- u32 r[5]; /* key, base 2^26 */
+ union {
+ u32 r[5];
+ u64 r64[3];
+ };
+};
+
+struct poly1305_core_key {
+ struct poly1305_key key;
+ struct poly1305_key precomputed_s;
};
struct poly1305_state {
- u32 h[5]; /* accumulator, base 2^26 */
+ union {
+ u32 h[5];
+ u64 h64[3];
+ };
};
struct poly1305_desc_ctx {
- /* key */
- struct poly1305_key r;
- /* finalize key */
- u32 s[4];
- /* accumulator */
- struct poly1305_state h;
/* partial buffer */
u8 buf[POLY1305_BLOCK_SIZE];
/* bytes used in partial buffer */
unsigned int buflen;
- /* r key has been set */
- bool rset;
- /* s key has been set */
+ /* how many keys have been set in r[] */
+ unsigned short rset;
+ /* whether s[] has been set */
bool sset;
+ /* finalize key */
+ u32 s[4];
+ /* accumulator */
+ struct poly1305_state h;
+ /* key */
+ union {
+ struct poly1305_key opaque_r[CONFIG_CRYPTO_LIB_POLY1305_RSIZE];
+ struct poly1305_core_key core_r;
+ };
};
-/*
- * Poly1305 core functions. These implement the ε-almost-∆-universal hash
- * function underlying the Poly1305 MAC, i.e. they don't add an encrypted nonce
- * ("s key") at the end. They also only support block-aligned inputs.
- */
-void poly1305_core_setkey(struct poly1305_key *key, const u8 *raw_key);
-static inline void poly1305_core_init(struct poly1305_state *state)
+void poly1305_init_arch(struct poly1305_desc_ctx *desc, const u8 *key);
+void poly1305_init_generic(struct poly1305_desc_ctx *desc, const u8 *key);
+
+static inline void poly1305_init(struct poly1305_desc_ctx *desc, const u8 *key)
+{
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
+ poly1305_init_arch(desc, key);
+ else
+ poly1305_init_generic(desc, key);
+}
+
+void poly1305_update_arch(struct poly1305_desc_ctx *desc, const u8 *src,
+ unsigned int nbytes);
+void poly1305_update_generic(struct poly1305_desc_ctx *desc, const u8 *src,
+ unsigned int nbytes);
+
+static inline void poly1305_update(struct poly1305_desc_ctx *desc,
+ const u8 *src, unsigned int nbytes)
{
- memset(state->h, 0, sizeof(state->h));
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
+ poly1305_update_arch(desc, src, nbytes);
+ else
+ poly1305_update_generic(desc, src, nbytes);
}
-void poly1305_core_blocks(struct poly1305_state *state,
- const struct poly1305_key *key,
- const void *src, unsigned int nblocks);
-void poly1305_core_emit(const struct poly1305_state *state, void *dst);
-/* Crypto API helper functions for the Poly1305 MAC */
-int crypto_poly1305_init(struct shash_desc *desc);
-unsigned int crypto_poly1305_setdesckey(struct poly1305_desc_ctx *dctx,
- const u8 *src, unsigned int srclen);
-int crypto_poly1305_update(struct shash_desc *desc,
- const u8 *src, unsigned int srclen);
-int crypto_poly1305_final(struct shash_desc *desc, u8 *dst);
+void poly1305_final_arch(struct poly1305_desc_ctx *desc, u8 *digest);
+void poly1305_final_generic(struct poly1305_desc_ctx *desc, u8 *digest);
+
+static inline void poly1305_final(struct poly1305_desc_ctx *desc, u8 *digest)
+{
+ if (IS_ENABLED(CONFIG_CRYPTO_ARCH_HAVE_LIB_POLY1305))
+ poly1305_final_arch(desc, digest);
+ else
+ poly1305_final_generic(desc, digest);
+}
#endif
diff --git a/include/crypto/serpent.h b/include/crypto/serpent.h
index 7dd780c5d058..75c7eaa20853 100644
--- a/include/crypto/serpent.h
+++ b/include/crypto/serpent.h
@@ -22,7 +22,7 @@ int __serpent_setkey(struct serpent_ctx *ctx, const u8 *key,
unsigned int keylen);
int serpent_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int keylen);
-void __serpent_encrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src);
-void __serpent_decrypt(struct serpent_ctx *ctx, u8 *dst, const u8 *src);
+void __serpent_encrypt(const void *ctx, u8 *dst, const u8 *src);
+void __serpent_decrypt(const void *ctx, u8 *dst, const u8 *src);
#endif
diff --git a/include/crypto/skcipher.h b/include/crypto/skcipher.h
index 37c164234d97..141e7690f9c3 100644
--- a/include/crypto/skcipher.h
+++ b/include/crypto/skcipher.h
@@ -35,14 +35,7 @@ struct skcipher_request {
};
struct crypto_skcipher {
- int (*setkey)(struct crypto_skcipher *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct skcipher_request *req);
- int (*decrypt)(struct skcipher_request *req);
-
- unsigned int ivsize;
unsigned int reqsize;
- unsigned int keysize;
struct crypto_tfm base;
};
@@ -218,30 +211,13 @@ static inline void crypto_free_sync_skcipher(struct crypto_sync_skcipher *tfm)
* crypto_has_skcipher() - Search for the availability of an skcipher.
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
* skcipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Return: true when the skcipher is known to the kernel crypto API; false
- * otherwise
- */
-static inline int crypto_has_skcipher(const char *alg_name, u32 type,
- u32 mask)
-{
- return crypto_has_alg(alg_name, crypto_skcipher_type(type),
- crypto_skcipher_mask(mask));
-}
-
-/**
- * crypto_has_skcipher2() - Search for the availability of an skcipher.
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * skcipher
* @type: specifies the type of the skcipher
* @mask: specifies the mask for the skcipher
*
* Return: true when the skcipher is known to the kernel crypto API; false
* otherwise
*/
-int crypto_has_skcipher2(const char *alg_name, u32 type, u32 mask);
+int crypto_has_skcipher(const char *alg_name, u32 type, u32 mask);
static inline const char *crypto_skcipher_driver_name(
struct crypto_skcipher *tfm)
@@ -258,13 +234,6 @@ static inline struct skcipher_alg *crypto_skcipher_alg(
static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg)
{
- if ((alg->base.cra_flags & CRYPTO_ALG_TYPE_MASK) ==
- CRYPTO_ALG_TYPE_BLKCIPHER)
- return alg->base.cra_blkcipher.ivsize;
-
- if (alg->base.cra_ablkcipher.encrypt)
- return alg->base.cra_ablkcipher.ivsize;
-
return alg->ivsize;
}
@@ -279,7 +248,7 @@ static inline unsigned int crypto_skcipher_alg_ivsize(struct skcipher_alg *alg)
*/
static inline unsigned int crypto_skcipher_ivsize(struct crypto_skcipher *tfm)
{
- return tfm->ivsize;
+ return crypto_skcipher_alg(tfm)->ivsize;
}
static inline unsigned int crypto_sync_skcipher_ivsize(
@@ -304,6 +273,29 @@ static inline unsigned int crypto_skcipher_blocksize(
return crypto_tfm_alg_blocksize(crypto_skcipher_tfm(tfm));
}
+static inline unsigned int crypto_skcipher_alg_chunksize(
+ struct skcipher_alg *alg)
+{
+ return alg->chunksize;
+}
+
+/**
+ * crypto_skcipher_chunksize() - obtain chunk size
+ * @tfm: cipher handle
+ *
+ * The block size is set to one for ciphers such as CTR. However,
+ * you still need to provide incremental updates in multiples of
+ * the underlying block size as the IV does not have sub-block
+ * granularity. This is known in this API as the chunk size.
+ *
+ * Return: chunk size in bytes
+ */
+static inline unsigned int crypto_skcipher_chunksize(
+ struct crypto_skcipher *tfm)
+{
+ return crypto_skcipher_alg_chunksize(crypto_skcipher_alg(tfm));
+}
+
static inline unsigned int crypto_sync_skcipher_blocksize(
struct crypto_sync_skcipher *tfm)
{
@@ -367,11 +359,8 @@ static inline void crypto_sync_skcipher_clear_flags(
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
-static inline int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
- const u8 *key, unsigned int keylen)
-{
- return tfm->setkey(tfm, key, keylen);
-}
+int crypto_skcipher_setkey(struct crypto_skcipher *tfm,
+ const u8 *key, unsigned int keylen);
static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm,
const u8 *key, unsigned int keylen)
@@ -379,10 +368,16 @@ static inline int crypto_sync_skcipher_setkey(struct crypto_sync_skcipher *tfm,
return crypto_skcipher_setkey(&tfm->base, key, keylen);
}
-static inline unsigned int crypto_skcipher_default_keysize(
+static inline unsigned int crypto_skcipher_min_keysize(
+ struct crypto_skcipher *tfm)
+{
+ return crypto_skcipher_alg(tfm)->min_keysize;
+}
+
+static inline unsigned int crypto_skcipher_max_keysize(
struct crypto_skcipher *tfm)
{
- return tfm->keysize;
+ return crypto_skcipher_alg(tfm)->max_keysize;
}
/**
diff --git a/include/crypto/twofish.h b/include/crypto/twofish.h
index 2e2c09673d88..f6b307a58554 100644
--- a/include/crypto/twofish.h
+++ b/include/crypto/twofish.h
@@ -19,7 +19,7 @@ struct twofish_ctx {
};
int __twofish_setkey(struct twofish_ctx *ctx, const u8 *key,
- unsigned int key_len, u32 *flags);
+ unsigned int key_len);
int twofish_setkey(struct crypto_tfm *tfm, const u8 *key, unsigned int key_len);
#endif
diff --git a/include/crypto/xts.h b/include/crypto/xts.h
index 75fd96ff976b..0f8dba69feb4 100644
--- a/include/crypto/xts.h
+++ b/include/crypto/xts.h
@@ -8,28 +8,19 @@
#define XTS_BLOCK_SIZE 16
-#define XTS_TWEAK_CAST(x) ((void (*)(void *, u8*, const u8*))(x))
-
static inline int xts_check_key(struct crypto_tfm *tfm,
const u8 *key, unsigned int keylen)
{
- u32 *flags = &tfm->crt_flags;
-
/*
* key consists of keys of equal size concatenated, therefore
* the length must be even.
*/
- if (keylen % 2) {
- *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
+ if (keylen % 2)
return -EINVAL;
- }
/* ensure that the AES and tweak key are not identical */
- if (fips_enabled &&
- !crypto_memneq(key, key + (keylen / 2), keylen / 2)) {
- *flags |= CRYPTO_TFM_RES_WEAK_KEY;
+ if (fips_enabled && !crypto_memneq(key, key + (keylen / 2), keylen / 2))
return -EINVAL;
- }
return 0;
}
@@ -41,18 +32,14 @@ static inline int xts_verify_key(struct crypto_skcipher *tfm,
* key consists of keys of equal size concatenated, therefore
* the length must be even.
*/
- if (keylen % 2) {
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
+ if (keylen % 2)
return -EINVAL;
- }
/* ensure that the AES and tweak key are not identical */
if ((fips_enabled || (crypto_skcipher_get_flags(tfm) &
CRYPTO_TFM_REQ_FORBID_WEAK_KEYS)) &&
- !crypto_memneq(key, key + (keylen / 2), keylen / 2)) {
- crypto_skcipher_set_flags(tfm, CRYPTO_TFM_RES_WEAK_KEY);
+ !crypto_memneq(key, key + (keylen / 2), keylen / 2))
return -EINVAL;
- }
return 0;
}
diff --git a/include/drm/amd_asic_type.h b/include/drm/amd_asic_type.h
index 296aab724677..b1230e33d506 100644
--- a/include/drm/amd_asic_type.h
+++ b/include/drm/amd_asic_type.h
@@ -27,34 +27,36 @@
*/
enum amd_asic_type {
CHIP_TAHITI = 0,
- CHIP_PITCAIRN,
- CHIP_VERDE,
- CHIP_OLAND,
- CHIP_HAINAN,
- CHIP_BONAIRE,
- CHIP_KAVERI,
- CHIP_KABINI,
- CHIP_HAWAII,
- CHIP_MULLINS,
- CHIP_TOPAZ,
- CHIP_TONGA,
- CHIP_FIJI,
- CHIP_CARRIZO,
- CHIP_STONEY,
- CHIP_POLARIS10,
- CHIP_POLARIS11,
- CHIP_POLARIS12,
- CHIP_VEGAM,
- CHIP_VEGA10,
- CHIP_VEGA12,
- CHIP_VEGA20,
- CHIP_RAVEN,
- CHIP_ARCTURUS,
- CHIP_RENOIR,
- CHIP_NAVI10,
- CHIP_NAVI14,
- CHIP_NAVI12,
+ CHIP_PITCAIRN, /* 1 */
+ CHIP_VERDE, /* 2 */
+ CHIP_OLAND, /* 3 */
+ CHIP_HAINAN, /* 4 */
+ CHIP_BONAIRE, /* 5 */
+ CHIP_KAVERI, /* 6 */
+ CHIP_KABINI, /* 7 */
+ CHIP_HAWAII, /* 8 */
+ CHIP_MULLINS, /* 9 */
+ CHIP_TOPAZ, /* 10 */
+ CHIP_TONGA, /* 11 */
+ CHIP_FIJI, /* 12 */
+ CHIP_CARRIZO, /* 13 */
+ CHIP_STONEY, /* 14 */
+ CHIP_POLARIS10, /* 15 */
+ CHIP_POLARIS11, /* 16 */
+ CHIP_POLARIS12, /* 17 */
+ CHIP_VEGAM, /* 18 */
+ CHIP_VEGA10, /* 19 */
+ CHIP_VEGA12, /* 20 */
+ CHIP_VEGA20, /* 21 */
+ CHIP_RAVEN, /* 22 */
+ CHIP_ARCTURUS, /* 23 */
+ CHIP_RENOIR, /* 24 */
+ CHIP_NAVI10, /* 25 */
+ CHIP_NAVI14, /* 26 */
+ CHIP_NAVI12, /* 27 */
CHIP_LAST,
};
+extern const char *amdgpu_asic_name[];
+
#endif /*__AMD_ASIC_TYPE_H__ */
diff --git a/include/drm/ati_pcigart.h b/include/drm/ati_pcigart.h
deleted file mode 100644
index a728a1364e66..000000000000
--- a/include/drm/ati_pcigart.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef DRM_ATI_PCIGART_H
-#define DRM_ATI_PCIGART_H
-
-#include <drm/drm_legacy.h>
-
-/* location of GART table */
-#define DRM_ATI_GART_MAIN 1
-#define DRM_ATI_GART_FB 2
-
-#define DRM_ATI_GART_PCI 1
-#define DRM_ATI_GART_PCIE 2
-#define DRM_ATI_GART_IGP 3
-
-struct drm_ati_pcigart_info {
- int gart_table_location;
- int gart_reg_if;
- void *addr;
- dma_addr_t bus_addr;
- dma_addr_t table_mask;
- struct drm_dma_handle *table_handle;
- struct drm_local_map mapping;
- int table_size;
-};
-
-extern int drm_ati_pcigart_init(struct drm_device *dev,
- struct drm_ati_pcigart_info * gart_info);
-extern int drm_ati_pcigart_cleanup(struct drm_device *dev,
- struct drm_ati_pcigart_info * gart_info);
-
-#endif
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index cf528c289857..9d4d5cc47969 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -6,6 +6,8 @@
#ifndef __DW_HDMI__
#define __DW_HDMI__
+#include <sound/hdmi-codec.h>
+
struct drm_connector;
struct drm_display_mode;
struct drm_encoder;
@@ -126,6 +128,7 @@ struct dw_hdmi_plat_data {
const struct drm_display_mode *mode);
unsigned long input_bus_format;
unsigned long input_bus_encoding;
+ bool use_drm_infoframe;
/* Vendor PHY support */
const struct dw_hdmi_phy_ops *phy_ops;
@@ -154,8 +157,11 @@ void dw_hdmi_resume(struct dw_hdmi *hdmi);
void dw_hdmi_setup_rx_sense(struct dw_hdmi *hdmi, bool hpd, bool rx_sense);
+int dw_hdmi_set_plugged_cb(struct dw_hdmi *hdmi, hdmi_codec_plugged_cb fn,
+ struct device *codec_dev);
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
void dw_hdmi_set_channel_count(struct dw_hdmi *hdmi, unsigned int cnt);
+void dw_hdmi_set_channel_status(struct dw_hdmi *hdmi, u8 *channel_status);
void dw_hdmi_set_channel_allocation(struct dw_hdmi *hdmi, unsigned int ca);
void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
void dw_hdmi_audio_disable(struct dw_hdmi *hdmi);
diff --git a/include/drm/bridge/dw_mipi_dsi.h b/include/drm/bridge/dw_mipi_dsi.h
index 94cc64a342e1..b0e390b3288e 100644
--- a/include/drm/bridge/dw_mipi_dsi.h
+++ b/include/drm/bridge/dw_mipi_dsi.h
@@ -19,6 +19,13 @@ struct dw_mipi_dsi;
struct mipi_dsi_device;
struct platform_device;
+struct dw_mipi_dsi_dphy_timing {
+ u16 data_hs2lp;
+ u16 data_lp2hs;
+ u16 clk_hs2lp;
+ u16 clk_lp2hs;
+};
+
struct dw_mipi_dsi_phy_ops {
int (*init)(void *priv_data);
void (*power_on)(void *priv_data);
@@ -27,6 +34,8 @@ struct dw_mipi_dsi_phy_ops {
const struct drm_display_mode *mode,
unsigned long mode_flags, u32 lanes, u32 format,
unsigned int *lane_mbps);
+ int (*get_timing)(void *priv_data, unsigned int lane_mbps,
+ struct dw_mipi_dsi_dphy_timing *timing);
};
struct dw_mipi_dsi_host_ops {
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
deleted file mode 100644
index 037b1f7a87a5..000000000000
--- a/include/drm/drmP.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/*
- * Internal Header for the Direct Rendering Manager
- *
- * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
- * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
- * Copyright (c) 2009-2010, Code Aurora Forum.
- * All rights reserved.
- *
- * Author: Rickard E. (Rik) Faith <faith@valinux.com>
- * Author: Gareth Hughes <gareth@valinux.com>
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
- * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
- * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
- * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
- * OTHER DEALINGS IN THE SOFTWARE.
- */
-
-#ifndef _DRM_P_H_
-#define _DRM_P_H_
-
-#include <linux/agp_backend.h>
-#include <linux/cdev.h>
-#include <linux/dma-mapping.h>
-#include <linux/file.h>
-#include <linux/fs.h>
-#include <linux/highmem.h>
-#include <linux/idr.h>
-#include <linux/init.h>
-#include <linux/io.h>
-#include <linux/jiffies.h>
-#include <linux/kernel.h>
-#include <linux/kref.h>
-#include <linux/miscdevice.h>
-#include <linux/mm.h>
-#include <linux/mutex.h>
-#include <linux/platform_device.h>
-#include <linux/poll.h>
-#include <linux/ratelimit.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/types.h>
-#include <linux/vmalloc.h>
-#include <linux/workqueue.h>
-#include <linux/dma-fence.h>
-#include <linux/module.h>
-#include <linux/mman.h>
-#include <asm/pgalloc.h>
-#include <linux/uaccess.h>
-
-#include <uapi/drm/drm.h>
-#include <uapi/drm/drm_mode.h>
-
-#include <drm/drm_agpsupport.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_fourcc.h>
-#include <drm/drm_hashtab.h>
-#include <drm/drm_mm.h>
-#include <drm/drm_os_linux.h>
-#include <drm/drm_sarea.h>
-#include <drm/drm_drv.h>
-#include <drm/drm_prime.h>
-#include <drm/drm_print.h>
-#include <drm/drm_pci.h>
-#include <drm/drm_file.h>
-#include <drm/drm_debugfs.h>
-#include <drm/drm_ioctl.h>
-#include <drm/drm_sysfs.h>
-#include <drm/drm_vblank.h>
-#include <drm/drm_irq.h>
-#include <drm/drm_device.h>
-
-struct module;
-
-struct device_node;
-struct videomode;
-struct dma_resv;
-struct dma_buf_attachment;
-
-struct pci_dev;
-struct pci_controller;
-
-/*
- * NOTE: drmP.h is obsolete - do NOT add anything to this file
- *
- * Do not include drmP.h in new files.
- * Work is ongoing to remove drmP.h includes from existing files
- */
-
-#endif
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 927e1205d7aa..951dfb15c27b 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -35,7 +35,7 @@
* struct drm_crtc_commit - track modeset commits on a CRTC
*
* This structure is used to track pending modeset changes and atomic commit on
- * a per-CRTC basis. Since updating the list should never block this structure
+ * a per-CRTC basis. Since updating the list should never block, this structure
* is reference counted to allow waiters to safely wait on an event to complete,
* without holding any locks.
*
@@ -60,8 +60,8 @@
* wait for flip_done <----
* clean up atomic state
*
- * The important bit to know is that cleanup_done is the terminal event, but the
- * ordering between flip_done and hw_done is entirely up to the specific driver
+ * The important bit to know is that &cleanup_done is the terminal event, but the
+ * ordering between &flip_done and &hw_done is entirely up to the specific driver
* and modeset state change.
*
* For an implementation of how to use this look at
@@ -92,6 +92,9 @@ struct drm_crtc_commit {
* commit is sent to userspace, or when an out-fence is singalled. Note
* that for most hardware, in most cases this happens after @hw_done is
* signalled.
+ *
+ * Completion of this stage is signalled implicitly by calling
+ * drm_crtc_send_vblank_event() on &drm_crtc_state.event.
*/
struct completion flip_done;
@@ -107,6 +110,9 @@ struct drm_crtc_commit {
* Note that this does not need to include separately reference-counted
* resources like backing storage buffer pinning, or runtime pm
* management.
+ *
+ * Drivers should call drm_atomic_helper_commit_hw_done() to signal
+ * completion of this stage.
*/
struct completion hw_done;
@@ -118,6 +124,9 @@ struct drm_crtc_commit {
* a vblank wait completed it might be a bit later. This completion is
* useful to throttle updates and avoid hardware updates getting ahead
* of the buffer cleanup too much.
+ *
+ * Drivers should call drm_atomic_helper_commit_cleanup_done() to signal
+ * completion of this stage.
*/
struct completion cleanup_done;
@@ -354,7 +363,7 @@ struct drm_atomic_state {
* When a connector or plane is not bound to any CRTC, it's still important
* to preserve linearity to prevent the atomic states from being freed to early.
*
- * This commit (if set) is not bound to any crtc, but will be completed when
+ * This commit (if set) is not bound to any CRTC, but will be completed when
* drm_atomic_helper_commit_hw_done() is called.
*/
struct drm_crtc_commit *fake_commit;
@@ -467,12 +476,12 @@ drm_atomic_get_new_connector_for_encoder(struct drm_atomic_state *state,
struct drm_encoder *encoder);
/**
- * drm_atomic_get_existing_crtc_state - get crtc state, if it exists
+ * drm_atomic_get_existing_crtc_state - get CRTC state, if it exists
* @state: global atomic state object
- * @crtc: crtc to grab
+ * @crtc: CRTC to grab
*
- * This function returns the crtc state for the given crtc, or NULL
- * if the crtc is not part of the global atomic state.
+ * This function returns the CRTC state for the given CRTC, or NULL
+ * if the CRTC is not part of the global atomic state.
*
* This function is deprecated, @drm_atomic_get_old_crtc_state or
* @drm_atomic_get_new_crtc_state should be used instead.
@@ -485,12 +494,12 @@ drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
}
/**
- * drm_atomic_get_old_crtc_state - get old crtc state, if it exists
+ * drm_atomic_get_old_crtc_state - get old CRTC state, if it exists
* @state: global atomic state object
- * @crtc: crtc to grab
+ * @crtc: CRTC to grab
*
- * This function returns the old crtc state for the given crtc, or
- * NULL if the crtc is not part of the global atomic state.
+ * This function returns the old CRTC state for the given CRTC, or
+ * NULL if the CRTC is not part of the global atomic state.
*/
static inline struct drm_crtc_state *
drm_atomic_get_old_crtc_state(struct drm_atomic_state *state,
@@ -499,12 +508,12 @@ drm_atomic_get_old_crtc_state(struct drm_atomic_state *state,
return state->crtcs[drm_crtc_index(crtc)].old_state;
}
/**
- * drm_atomic_get_new_crtc_state - get new crtc state, if it exists
+ * drm_atomic_get_new_crtc_state - get new CRTC state, if it exists
* @state: global atomic state object
- * @crtc: crtc to grab
+ * @crtc: CRTC to grab
*
- * This function returns the new crtc state for the given crtc, or
- * NULL if the crtc is not part of the global atomic state.
+ * This function returns the new CRTC state for the given CRTC, or
+ * NULL if the CRTC is not part of the global atomic state.
*/
static inline struct drm_crtc_state *
drm_atomic_get_new_crtc_state(struct drm_atomic_state *state,
@@ -693,6 +702,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(__i)++) \
for_each_if ((__state)->connectors[__i].ptr && \
((connector) = (__state)->connectors[__i].ptr, \
+ (void)(connector) /* Only to avoid unused-but-set-variable warning */, \
(old_connector_state) = (__state)->connectors[__i].old_state, \
(new_connector_state) = (__state)->connectors[__i].new_state, 1))
@@ -714,6 +724,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(__i)++) \
for_each_if ((__state)->connectors[__i].ptr && \
((connector) = (__state)->connectors[__i].ptr, \
+ (void)(connector) /* Only to avoid unused-but-set-variable warning */, \
(old_connector_state) = (__state)->connectors[__i].old_state, 1))
/**
@@ -734,7 +745,9 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(__i)++) \
for_each_if ((__state)->connectors[__i].ptr && \
((connector) = (__state)->connectors[__i].ptr, \
- (new_connector_state) = (__state)->connectors[__i].new_state, 1))
+ (void)(connector) /* Only to avoid unused-but-set-variable warning */, \
+ (new_connector_state) = (__state)->connectors[__i].new_state, \
+ (void)(new_connector_state) /* Only to avoid unused-but-set-variable warning */, 1))
/**
* for_each_oldnew_crtc_in_state - iterate over all CRTCs in an atomic update
@@ -754,7 +767,9 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(__i)++) \
for_each_if ((__state)->crtcs[__i].ptr && \
((crtc) = (__state)->crtcs[__i].ptr, \
+ (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \
(old_crtc_state) = (__state)->crtcs[__i].old_state, \
+ (void)(old_crtc_state) /* Only to avoid unused-but-set-variable warning */, \
(new_crtc_state) = (__state)->crtcs[__i].new_state, 1))
/**
@@ -793,7 +808,9 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(__i)++) \
for_each_if ((__state)->crtcs[__i].ptr && \
((crtc) = (__state)->crtcs[__i].ptr, \
- (new_crtc_state) = (__state)->crtcs[__i].new_state, 1))
+ (void)(crtc) /* Only to avoid unused-but-set-variable warning */, \
+ (new_crtc_state) = (__state)->crtcs[__i].new_state, \
+ (void)(new_crtc_state) /* Only to avoid unused-but-set-variable warning */, 1))
/**
* for_each_oldnew_plane_in_state - iterate over all planes in an atomic update
@@ -813,6 +830,7 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(__i)++) \
for_each_if ((__state)->planes[__i].ptr && \
((plane) = (__state)->planes[__i].ptr, \
+ (void)(plane) /* Only to avoid unused-but-set-variable warning */, \
(old_plane_state) = (__state)->planes[__i].old_state,\
(new_plane_state) = (__state)->planes[__i].new_state, 1))
@@ -873,7 +891,9 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(__i)++) \
for_each_if ((__state)->planes[__i].ptr && \
((plane) = (__state)->planes[__i].ptr, \
- (new_plane_state) = (__state)->planes[__i].new_state, 1))
+ (void)(plane) /* Only to avoid unused-but-set-variable warning */, \
+ (new_plane_state) = (__state)->planes[__i].new_state, \
+ (void)(new_plane_state) /* Only to avoid unused-but-set-variable warning */, 1))
/**
* for_each_oldnew_private_obj_in_state - iterate over all private objects in an atomic update
@@ -958,11 +978,11 @@ drm_atomic_crtc_needs_modeset(const struct drm_crtc_state *state)
}
/**
- * drm_atomic_crtc_effectively_active - compute whether crtc is actually active
+ * drm_atomic_crtc_effectively_active - compute whether CRTC is actually active
* @state: &drm_crtc_state for the CRTC
*
* When in self refresh mode, the crtc_state->active value will be false, since
- * the crtc is off. However in some cases we're interested in whether the crtc
+ * the CRTC is off. However in some cases we're interested in whether the CRTC
* is active, or effectively active (ie: it's connected to an active display).
* In these cases, use this function instead of just checking active.
*/
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index bf4e07141d81..9db3cac48f4f 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -152,7 +152,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
/**
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
* @plane: the loop cursor
- * @crtc: the crtc whose planes are iterated
+ * @crtc: the CRTC whose planes are iterated
*
* This iterates over the current state, useful (for example) when applying
* atomic state after it has been checked and swapped. To iterate over the
@@ -166,7 +166,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
/**
* drm_crtc_atomic_state_for_each_plane - iterate over attached planes in new state
* @plane: the loop cursor
- * @crtc_state: the incoming crtc-state
+ * @crtc_state: the incoming CRTC state
*
* Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
* attached if the specified state is applied. Useful during for example
@@ -180,7 +180,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
* drm_crtc_atomic_state_for_each_plane_state - iterate over attached planes in new state
* @plane: the loop cursor
* @plane_state: loop cursor for the plane's state, must be const
- * @crtc_state: the incoming crtc-state
+ * @crtc_state: the incoming CRTC state
*
* Similar to drm_crtc_for_each_plane(), but iterates the planes that will be
* attached if the specified state is applied. Useful during for example
@@ -189,7 +189,7 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
*
* Compared to just drm_atomic_crtc_state_for_each_plane() this also fills in a
* const plane_state. This is useful when a driver just wants to peek at other
- * active planes on this crtc, but does not need to change it.
+ * active planes on this CRTC, but does not need to change it.
*/
#define drm_atomic_crtc_state_for_each_plane_state(plane, plane_state, crtc_state) \
drm_for_each_plane_mask(plane, (crtc_state)->state->dev, (crtc_state)->plane_mask) \
diff --git a/include/drm/drm_atomic_state_helper.h b/include/drm/drm_atomic_state_helper.h
index e4577cc11689..8171dea4cc22 100644
--- a/include/drm/drm_atomic_state_helper.h
+++ b/include/drm/drm_atomic_state_helper.h
@@ -37,6 +37,8 @@ struct drm_private_state;
struct drm_modeset_acquire_ctx;
struct drm_device;
+void __drm_atomic_helper_crtc_state_reset(struct drm_crtc_state *state,
+ struct drm_crtc *crtc);
void __drm_atomic_helper_crtc_reset(struct drm_crtc *crtc,
struct drm_crtc_state *state);
void drm_atomic_helper_crtc_reset(struct drm_crtc *crtc);
@@ -48,6 +50,8 @@ void __drm_atomic_helper_crtc_destroy_state(struct drm_crtc_state *state);
void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
struct drm_crtc_state *state);
+void __drm_atomic_helper_plane_state_reset(struct drm_plane_state *state,
+ struct drm_plane *plane);
void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
struct drm_plane_state *state);
void drm_atomic_helper_plane_reset(struct drm_plane *plane);
@@ -59,6 +63,8 @@ void __drm_atomic_helper_plane_destroy_state(struct drm_plane_state *state);
void drm_atomic_helper_plane_destroy_state(struct drm_plane *plane,
struct drm_plane_state *state);
+void __drm_atomic_helper_connector_state_reset(struct drm_connector_state *conn_state,
+ struct drm_connector *connector);
void __drm_atomic_helper_connector_reset(struct drm_connector *connector,
struct drm_connector_state *conn_state);
void drm_atomic_helper_connector_reset(struct drm_connector *connector);
diff --git a/include/drm/drm_bridge.h b/include/drm/drm_bridge.h
index 7616f6562fe4..694e153a7531 100644
--- a/include/drm/drm_bridge.h
+++ b/include/drm/drm_bridge.h
@@ -25,6 +25,7 @@
#include <linux/list.h>
#include <linux/ctype.h>
+#include <drm/drm_encoder.h>
#include <drm/drm_mode_object.h>
#include <drm/drm_modes.h>
@@ -42,7 +43,7 @@ struct drm_bridge_funcs {
* This callback is invoked whenever our bridge is being attached to a
* &drm_encoder.
*
- * The attach callback is optional.
+ * The @attach callback is optional.
*
* RETURNS:
*
@@ -56,7 +57,7 @@ struct drm_bridge_funcs {
* This callback is invoked whenever our bridge is being detached from a
* &drm_encoder.
*
- * The detach callback is optional.
+ * The @detach callback is optional.
*/
void (*detach)(struct drm_bridge *bridge);
@@ -76,7 +77,7 @@ struct drm_bridge_funcs {
* atomic helpers to validate modes supplied by userspace in
* drm_atomic_helper_check_modeset().
*
- * This function is optional.
+ * The @mode_valid callback is optional.
*
* NOTE:
*
@@ -108,7 +109,7 @@ struct drm_bridge_funcs {
* this function passes all other callbacks must succeed for this
* configuration.
*
- * The mode_fixup callback is optional.
+ * The @mode_fixup callback is optional.
*
* NOTE:
*
@@ -146,7 +147,7 @@ struct drm_bridge_funcs {
* The bridge can assume that the display pipe (i.e. clocks and timing
* signals) feeding it is still running when this callback is called.
*
- * The disable callback is optional.
+ * The @disable callback is optional.
*/
void (*disable)(struct drm_bridge *bridge);
@@ -165,7 +166,7 @@ struct drm_bridge_funcs {
* singals) feeding it is no longer running when this callback is
* called.
*
- * The post_disable callback is optional.
+ * The @post_disable callback is optional.
*/
void (*post_disable)(struct drm_bridge *bridge);
@@ -214,7 +215,7 @@ struct drm_bridge_funcs {
* not enable the display link feeding the next bridge in the chain (if
* there is one) when this callback is called.
*
- * The pre_enable callback is optional.
+ * The @pre_enable callback is optional.
*/
void (*pre_enable)(struct drm_bridge *bridge);
@@ -234,7 +235,7 @@ struct drm_bridge_funcs {
* callback must enable the display link feeding the next bridge in the
* chain if there is one.
*
- * The enable callback is optional.
+ * The @enable callback is optional.
*/
void (*enable)(struct drm_bridge *bridge);
@@ -254,14 +255,15 @@ struct drm_bridge_funcs {
* there is one) when this callback is called.
*
* Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from &drm_bridge_pre_enable. It
- * would be prudent to also provide an implementation of @pre_enable if
- * you are expecting driver calls into &drm_bridge_pre_enable.
+ * atomic commit. It will not be invoked from
+ * &drm_bridge_chain_pre_enable. It would be prudent to also provide an
+ * implementation of @pre_enable if you are expecting driver calls into
+ * &drm_bridge_chain_pre_enable.
*
* The @atomic_pre_enable callback is optional.
*/
void (*atomic_pre_enable)(struct drm_bridge *bridge,
- struct drm_atomic_state *state);
+ struct drm_atomic_state *old_state);
/**
* @atomic_enable:
@@ -279,14 +281,14 @@ struct drm_bridge_funcs {
* chain if there is one.
*
* Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from &drm_bridge_enable. It
- * would be prudent to also provide an implementation of @enable if
- * you are expecting driver calls into &drm_bridge_enable.
+ * atomic commit. It will not be invoked from &drm_bridge_chain_enable.
+ * It would be prudent to also provide an implementation of @enable if
+ * you are expecting driver calls into &drm_bridge_chain_enable.
*
- * The enable callback is optional.
+ * The @atomic_enable callback is optional.
*/
void (*atomic_enable)(struct drm_bridge *bridge,
- struct drm_atomic_state *state);
+ struct drm_atomic_state *old_state);
/**
* @atomic_disable:
*
@@ -301,14 +303,15 @@ struct drm_bridge_funcs {
* signals) feeding it is still running when this callback is called.
*
* Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from &drm_bridge_disable. It
- * would be prudent to also provide an implementation of @disable if
- * you are expecting driver calls into &drm_bridge_disable.
+ * atomic commit. It will not be invoked from
+ * &drm_bridge_chain_disable. It would be prudent to also provide an
+ * implementation of @disable if you are expecting driver calls into
+ * &drm_bridge_chain_disable.
*
- * The disable callback is optional.
+ * The @atomic_disable callback is optional.
*/
void (*atomic_disable)(struct drm_bridge *bridge,
- struct drm_atomic_state *state);
+ struct drm_atomic_state *old_state);
/**
* @atomic_post_disable:
@@ -325,15 +328,16 @@ struct drm_bridge_funcs {
* called.
*
* Note that this function will only be invoked in the context of an
- * atomic commit. It will not be invoked from &drm_bridge_post_disable.
+ * atomic commit. It will not be invoked from
+ * &drm_bridge_chain_post_disable.
* It would be prudent to also provide an implementation of
* @post_disable if you are expecting driver calls into
- * &drm_bridge_post_disable.
+ * &drm_bridge_chain_post_disable.
*
- * The post_disable callback is optional.
+ * The @atomic_post_disable callback is optional.
*/
void (*atomic_post_disable)(struct drm_bridge *bridge,
- struct drm_atomic_state *state);
+ struct drm_atomic_state *old_state);
};
/**
@@ -380,8 +384,8 @@ struct drm_bridge {
struct drm_device *dev;
/** @encoder: encoder to which this bridge is connected */
struct drm_encoder *encoder;
- /** @next: the next bridge in the encoder chain */
- struct drm_bridge *next;
+ /** @chain_node: used to form a bridge chain */
+ struct list_head chain_node;
#ifdef CONFIG_OF
/** @of_node: device node pointer to the bridge */
struct device_node *of_node;
@@ -406,35 +410,98 @@ struct drm_bridge *of_drm_find_bridge(struct device_node *np);
int drm_bridge_attach(struct drm_encoder *encoder, struct drm_bridge *bridge,
struct drm_bridge *previous);
-bool drm_bridge_mode_fixup(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode);
-enum drm_mode_status drm_bridge_mode_valid(struct drm_bridge *bridge,
- const struct drm_display_mode *mode);
-void drm_bridge_disable(struct drm_bridge *bridge);
-void drm_bridge_post_disable(struct drm_bridge *bridge);
-void drm_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode);
-void drm_bridge_pre_enable(struct drm_bridge *bridge);
-void drm_bridge_enable(struct drm_bridge *bridge);
+/**
+ * drm_bridge_get_next_bridge() - Get the next bridge in the chain
+ * @bridge: bridge object
+ *
+ * RETURNS:
+ * the next bridge in the chain after @bridge, or NULL if @bridge is the last.
+ */
+static inline struct drm_bridge *
+drm_bridge_get_next_bridge(struct drm_bridge *bridge)
+{
+ if (list_is_last(&bridge->chain_node, &bridge->encoder->bridge_chain))
+ return NULL;
+
+ return list_next_entry(bridge, chain_node);
+}
+
+/**
+ * drm_bridge_get_prev_bridge() - Get the previous bridge in the chain
+ * @bridge: bridge object
+ *
+ * RETURNS:
+ * the previous bridge in the chain, or NULL if @bridge is the first.
+ */
+static inline struct drm_bridge *
+drm_bridge_get_prev_bridge(struct drm_bridge *bridge)
+{
+ if (list_is_first(&bridge->chain_node, &bridge->encoder->bridge_chain))
+ return NULL;
+
+ return list_prev_entry(bridge, chain_node);
+}
+
+/**
+ * drm_bridge_chain_get_first_bridge() - Get the first bridge in the chain
+ * @encoder: encoder object
+ *
+ * RETURNS:
+ * the first bridge in the chain, or NULL if @encoder has no bridge attached
+ * to it.
+ */
+static inline struct drm_bridge *
+drm_bridge_chain_get_first_bridge(struct drm_encoder *encoder)
+{
+ return list_first_entry_or_null(&encoder->bridge_chain,
+ struct drm_bridge, chain_node);
+}
+
+/**
+ * drm_for_each_bridge_in_chain() - Iterate over all bridges present in a chain
+ * @encoder: the encoder to iterate bridges on
+ * @bridge: a bridge pointer updated to point to the current bridge at each
+ * iteration
+ *
+ * Iterate over all bridges present in the bridge chain attached to @encoder.
+ */
+#define drm_for_each_bridge_in_chain(encoder, bridge) \
+ list_for_each_entry(bridge, &(encoder)->bridge_chain, chain_node)
+
+bool drm_bridge_chain_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+enum drm_mode_status
+drm_bridge_chain_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode);
+void drm_bridge_chain_disable(struct drm_bridge *bridge);
+void drm_bridge_chain_post_disable(struct drm_bridge *bridge);
+void drm_bridge_chain_mode_set(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ const struct drm_display_mode *adjusted_mode);
+void drm_bridge_chain_pre_enable(struct drm_bridge *bridge);
+void drm_bridge_chain_enable(struct drm_bridge *bridge);
-void drm_atomic_bridge_disable(struct drm_bridge *bridge,
- struct drm_atomic_state *state);
-void drm_atomic_bridge_post_disable(struct drm_bridge *bridge,
+void drm_atomic_bridge_chain_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state);
+void drm_atomic_bridge_chain_post_disable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state);
+void drm_atomic_bridge_chain_pre_enable(struct drm_bridge *bridge,
+ struct drm_atomic_state *state);
+void drm_atomic_bridge_chain_enable(struct drm_bridge *bridge,
struct drm_atomic_state *state);
-void drm_atomic_bridge_pre_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state);
-void drm_atomic_bridge_enable(struct drm_bridge *bridge,
- struct drm_atomic_state *state);
#ifdef CONFIG_DRM_PANEL_BRIDGE
-struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel,
- u32 connector_type);
+struct drm_bridge *drm_panel_bridge_add(struct drm_panel *panel);
+struct drm_bridge *drm_panel_bridge_add_typed(struct drm_panel *panel,
+ u32 connector_type);
void drm_panel_bridge_remove(struct drm_bridge *bridge);
struct drm_bridge *devm_drm_panel_bridge_add(struct device *dev,
- struct drm_panel *panel,
- u32 connector_type);
+ struct drm_panel *panel);
+struct drm_bridge *devm_drm_panel_bridge_add_typed(struct device *dev,
+ struct drm_panel *panel,
+ u32 connector_type);
+struct drm_connector *drm_panel_bridge_connector(struct drm_bridge *bridge);
#endif
#endif
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index 987ff16b9420..e9ad4863d915 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -45,7 +45,7 @@ static inline bool drm_arch_can_wc_memory(void)
{
#if defined(CONFIG_PPC) && !defined(CONFIG_NOT_COHERENT_CACHE)
return false;
-#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
+#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON64)
return false;
#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
/*
diff --git a/include/drm/drm_color_mgmt.h b/include/drm/drm_color_mgmt.h
index d1c662d92ab7..81c298488b0c 100644
--- a/include/drm/drm_color_mgmt.h
+++ b/include/drm/drm_color_mgmt.h
@@ -29,7 +29,30 @@
struct drm_crtc;
struct drm_plane;
-uint32_t drm_color_lut_extract(uint32_t user_input, uint32_t bit_precision);
+/**
+ * drm_color_lut_extract - clamp and round LUT entries
+ * @user_input: input value
+ * @bit_precision: number of bits the hw LUT supports
+ *
+ * Extract a degamma/gamma LUT value provided by user (in the form of
+ * &drm_color_lut entries) and round it to the precision supported by the
+ * hardware.
+ */
+static inline u32 drm_color_lut_extract(u32 user_input, int bit_precision)
+{
+ u32 val = user_input;
+ u32 max = 0xffff >> (16 - bit_precision);
+
+ /* Round only if we're not using full precision. */
+ if (bit_precision < 16) {
+ val += 1UL << (16 - bit_precision - 1);
+ val >>= 16 - bit_precision;
+ }
+
+ return clamp_val(val, 0, max);
+}
+
+u64 drm_color_ctm_s31_32_to_qm_n(u64 user_input, u32 m, u32 n);
void drm_crtc_enable_color_mgmt(struct drm_crtc *crtc,
uint degamma_lut_size,
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index 681cb590f952..221910948b37 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -188,19 +188,19 @@ struct drm_hdmi_info {
/**
* @y420_vdb_modes: bitmap of modes which can support ycbcr420
- * output only (not normal RGB/YCBCR444/422 outputs). There are total
- * 107 VICs defined by CEA-861-F spec, so the size is 128 bits to map
- * upto 128 VICs;
+ * output only (not normal RGB/YCBCR444/422 outputs). The max VIC
+ * defined by the CEA-861-G spec is 219, so the size is 256 bits to map
+ * up to 256 VICs.
*/
- unsigned long y420_vdb_modes[BITS_TO_LONGS(128)];
+ unsigned long y420_vdb_modes[BITS_TO_LONGS(256)];
/**
* @y420_cmdb_modes: bitmap of modes which can support ycbcr420
- * output also, along with normal HDMI outputs. There are total 107
- * VICs defined by CEA-861-F spec, so the size is 128 bits to map upto
- * 128 VICs;
+ * output also, along with normal HDMI outputs. The max VIC defined by
+ * the CEA-861-G spec is 219, so the size is 256 bits to map up to 256
+ * VICs.
*/
- unsigned long y420_cmdb_modes[BITS_TO_LONGS(128)];
+ unsigned long y420_cmdb_modes[BITS_TO_LONGS(256)];
/** @y420_cmdb_map: bitmap of SVD index, to extraxt vcb modes */
u64 y420_cmdb_map;
@@ -281,6 +281,10 @@ enum drm_panel_orientation {
/* Additional Colorimetry extension added as part of CTA 861.G */
#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_D65 11
#define DRM_MODE_COLORIMETRY_DCI_P3_RGB_THEATER 12
+/* Additional Colorimetry Options added for DP 1.4a VSC Colorimetry Format */
+#define DRM_MODE_COLORIMETRY_RGB_WIDE_FIXED 13
+#define DRM_MODE_COLORIMETRY_RGB_WIDE_FLOAT 14
+#define DRM_MODE_COLORIMETRY_BT601_YCC 15
/**
* enum drm_bus_flags - bus_flags info for &drm_display_info
@@ -1066,6 +1070,14 @@ struct drm_cmdline_mode {
unsigned int rotation_reflection;
/**
+ * @panel_orientation:
+ *
+ * drm-connector "panel orientation" property override value,
+ * DRM_MODE_PANEL_ORIENTATION_UNKNOWN if not set.
+ */
+ enum drm_panel_orientation panel_orientation;
+
+ /**
* @tv_margins: TV margins to apply to the mode.
*/
struct drm_connector_tv_margins tv_margins;
@@ -1288,12 +1300,12 @@ struct drm_connector {
/** @override_edid: has the EDID been overwritten through debugfs for testing? */
bool override_edid;
-#define DRM_CONNECTOR_MAX_ENCODER 3
/**
- * @encoder_ids: Valid encoders for this connector. Please only use
- * drm_connector_for_each_possible_encoder() to enumerate these.
+ * @possible_encoders: Bit mask of encoders that can drive this
+ * connector, drm_encoder_index() determines the index into the bitfield
+ * and the bits are set with drm_connector_attach_encoder().
*/
- uint32_t encoder_ids[DRM_CONNECTOR_MAX_ENCODER];
+ u32 possible_encoders;
/**
* @encoder: Currently bound encoder driving this connector, if any.
@@ -1523,7 +1535,8 @@ int drm_connector_attach_scaling_mode_property(struct drm_connector *connector,
int drm_connector_attach_vrr_capable_property(
struct drm_connector *connector);
int drm_mode_create_aspect_ratio_property(struct drm_device *dev);
-int drm_mode_create_colorspace_property(struct drm_connector *connector);
+int drm_mode_create_hdmi_colorspace_property(struct drm_connector *connector);
+int drm_mode_create_dp_colorspace_property(struct drm_connector *connector);
int drm_mode_create_content_type_property(struct drm_device *dev);
void drm_hdmi_avi_infoframe_content_type(struct hdmi_avi_infoframe *frame,
const struct drm_connector_state *conn_state);
@@ -1608,13 +1621,9 @@ bool drm_connector_has_possible_encoder(struct drm_connector *connector,
* drm_connector_for_each_possible_encoder - iterate connector's possible encoders
* @connector: &struct drm_connector pointer
* @encoder: &struct drm_encoder pointer used as cursor
- * @__i: int iteration cursor, for macro-internal use
*/
-#define drm_connector_for_each_possible_encoder(connector, encoder, __i) \
- for ((__i) = 0; (__i) < ARRAY_SIZE((connector)->encoder_ids) && \
- (connector)->encoder_ids[(__i)] != 0; (__i)++) \
- for_each_if((encoder) = \
- drm_encoder_find((connector)->dev, NULL, \
- (connector)->encoder_ids[(__i)])) \
+#define drm_connector_for_each_possible_encoder(connector, encoder) \
+ drm_for_each_encoder_mask(encoder, (connector)->dev, \
+ (connector)->possible_encoders)
#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 408b6f4e63c0..5e9b15a0e8c5 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -41,7 +41,6 @@
#include <drm/drm_connector.h>
#include <drm/drm_device.h>
#include <drm/drm_property.h>
-#include <drm/drm_bridge.h>
#include <drm/drm_edid.h>
#include <drm/drm_plane.h>
#include <drm/drm_blend.h>
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 8364502f92cf..bc04467f7c3a 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -23,9 +23,9 @@
#ifndef _DRM_DP_HELPER_H_
#define _DRM_DP_HELPER_H_
-#include <linux/types.h>
-#include <linux/i2c.h>
#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
/*
* Unless otherwise noted, all values are from the DP 1.1a spec. Note that
@@ -42,6 +42,48 @@
* 1.2 formally includes both eDP and DPI definitions.
*/
+/* MSA (Main Stream Attribute) MISC bits (as MISC1<<8|MISC0) */
+#define DP_MSA_MISC_SYNC_CLOCK (1 << 0)
+#define DP_MSA_MISC_INTERLACE_VTOTAL_EVEN (1 << 8)
+#define DP_MSA_MISC_STEREO_NO_3D (0 << 9)
+#define DP_MSA_MISC_STEREO_PROG_RIGHT_EYE (1 << 9)
+#define DP_MSA_MISC_STEREO_PROG_LEFT_EYE (3 << 9)
+/* bits per component for non-RAW */
+#define DP_MSA_MISC_6_BPC (0 << 5)
+#define DP_MSA_MISC_8_BPC (1 << 5)
+#define DP_MSA_MISC_10_BPC (2 << 5)
+#define DP_MSA_MISC_12_BPC (3 << 5)
+#define DP_MSA_MISC_16_BPC (4 << 5)
+/* bits per component for RAW */
+#define DP_MSA_MISC_RAW_6_BPC (1 << 5)
+#define DP_MSA_MISC_RAW_7_BPC (2 << 5)
+#define DP_MSA_MISC_RAW_8_BPC (3 << 5)
+#define DP_MSA_MISC_RAW_10_BPC (4 << 5)
+#define DP_MSA_MISC_RAW_12_BPC (5 << 5)
+#define DP_MSA_MISC_RAW_14_BPC (6 << 5)
+#define DP_MSA_MISC_RAW_16_BPC (7 << 5)
+/* pixel encoding/colorimetry format */
+#define _DP_MSA_MISC_COLOR(misc1_7, misc0_21, misc0_3, misc0_4) \
+ ((misc1_7) << 15 | (misc0_4) << 4 | (misc0_3) << 3 | ((misc0_21) << 1))
+#define DP_MSA_MISC_COLOR_RGB _DP_MSA_MISC_COLOR(0, 0, 0, 0)
+#define DP_MSA_MISC_COLOR_CEA_RGB _DP_MSA_MISC_COLOR(0, 0, 1, 0)
+#define DP_MSA_MISC_COLOR_RGB_WIDE_FIXED _DP_MSA_MISC_COLOR(0, 3, 0, 0)
+#define DP_MSA_MISC_COLOR_RGB_WIDE_FLOAT _DP_MSA_MISC_COLOR(0, 3, 0, 1)
+#define DP_MSA_MISC_COLOR_Y_ONLY _DP_MSA_MISC_COLOR(1, 0, 0, 0)
+#define DP_MSA_MISC_COLOR_RAW _DP_MSA_MISC_COLOR(1, 1, 0, 0)
+#define DP_MSA_MISC_COLOR_YCBCR_422_BT601 _DP_MSA_MISC_COLOR(0, 1, 1, 0)
+#define DP_MSA_MISC_COLOR_YCBCR_422_BT709 _DP_MSA_MISC_COLOR(0, 1, 1, 1)
+#define DP_MSA_MISC_COLOR_YCBCR_444_BT601 _DP_MSA_MISC_COLOR(0, 2, 1, 0)
+#define DP_MSA_MISC_COLOR_YCBCR_444_BT709 _DP_MSA_MISC_COLOR(0, 2, 1, 1)
+#define DP_MSA_MISC_COLOR_XVYCC_422_BT601 _DP_MSA_MISC_COLOR(0, 1, 0, 0)
+#define DP_MSA_MISC_COLOR_XVYCC_422_BT709 _DP_MSA_MISC_COLOR(0, 1, 0, 1)
+#define DP_MSA_MISC_COLOR_XVYCC_444_BT601 _DP_MSA_MISC_COLOR(0, 2, 0, 0)
+#define DP_MSA_MISC_COLOR_XVYCC_444_BT709 _DP_MSA_MISC_COLOR(0, 2, 0, 1)
+#define DP_MSA_MISC_COLOR_OPRGB _DP_MSA_MISC_COLOR(0, 0, 1, 1)
+#define DP_MSA_MISC_COLOR_DCI_P3 _DP_MSA_MISC_COLOR(0, 3, 1, 0)
+#define DP_MSA_MISC_COLOR_COLOR_PROFILE _DP_MSA_MISC_COLOR(0, 3, 1, 1)
+#define DP_MSA_MISC_COLOR_VSC_SDP (1 << 14)
+
#define DP_AUX_MAX_PAYLOAD_BYTES 16
#define DP_AUX_I2C_WRITE 0x0
@@ -95,6 +137,7 @@
# define DP_DETAILED_CAP_INFO_AVAILABLE (1 << 4) /* DPI */
#define DP_MAIN_LINK_CHANNEL_CODING 0x006
+# define DP_CAP_ANSI_8B10B (1 << 0)
#define DP_DOWN_STREAM_PORT_COUNT 0x007
# define DP_PORT_COUNT_MASK 0x0f
@@ -264,7 +307,7 @@
# define DP_DSC_THROUGHPUT_MODE_0_900 (12 << 0)
# define DP_DSC_THROUGHPUT_MODE_0_950 (13 << 0)
# define DP_DSC_THROUGHPUT_MODE_0_1000 (14 << 0)
-# define DP_DSC_THROUGHPUT_MODE_0_170 (15 << 4)
+# define DP_DSC_THROUGHPUT_MODE_0_170 (15 << 0) /* 1.4a */
# define DP_DSC_THROUGHPUT_MODE_1_MASK (0xf << 4)
# define DP_DSC_THROUGHPUT_MODE_1_SHIFT 4
# define DP_DSC_THROUGHPUT_MODE_1_UPSUPPORTED 0
@@ -562,6 +605,14 @@
# define DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT 6
#define DP_ADJUST_REQUEST_POST_CURSOR2 0x20c
+# define DP_ADJUST_POST_CURSOR2_LANE0_MASK 0x03
+# define DP_ADJUST_POST_CURSOR2_LANE0_SHIFT 0
+# define DP_ADJUST_POST_CURSOR2_LANE1_MASK 0x0c
+# define DP_ADJUST_POST_CURSOR2_LANE1_SHIFT 2
+# define DP_ADJUST_POST_CURSOR2_LANE2_MASK 0x30
+# define DP_ADJUST_POST_CURSOR2_LANE2_SHIFT 4
+# define DP_ADJUST_POST_CURSOR2_LANE3_MASK 0xc0
+# define DP_ADJUST_POST_CURSOR2_LANE3_SHIFT 6
#define DP_TEST_REQUEST 0x218
# define DP_TEST_LINK_TRAINING (1 << 0)
@@ -966,6 +1017,38 @@
#define DP_HDCP_2_2_REG_STREAM_TYPE_OFFSET 0x69494
#define DP_HDCP_2_2_REG_DBG_OFFSET 0x69518
+/* Link Training (LT)-tunable PHY Repeaters */
+#define DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV 0xf0000 /* 1.3 */
+#define DP_MAX_LINK_RATE_PHY_REPEATER 0xf0001 /* 1.4a */
+#define DP_PHY_REPEATER_CNT 0xf0002 /* 1.3 */
+#define DP_PHY_REPEATER_MODE 0xf0003 /* 1.3 */
+#define DP_MAX_LANE_COUNT_PHY_REPEATER 0xf0004 /* 1.4a */
+#define DP_Repeater_FEC_CAPABILITY 0xf0004 /* 1.4 */
+#define DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT 0xf0005 /* 1.4a */
+#define DP_TRAINING_PATTERN_SET_PHY_REPEATER1 0xf0010 /* 1.3 */
+#define DP_TRAINING_LANE0_SET_PHY_REPEATER1 0xf0011 /* 1.3 */
+#define DP_TRAINING_LANE1_SET_PHY_REPEATER1 0xf0012 /* 1.3 */
+#define DP_TRAINING_LANE2_SET_PHY_REPEATER1 0xf0013 /* 1.3 */
+#define DP_TRAINING_LANE3_SET_PHY_REPEATER1 0xf0014 /* 1.3 */
+#define DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 0xf0020 /* 1.4a */
+#define DP_TRANSMITTER_CAPABILITY_PHY_REPEATER1 0xf0021 /* 1.4a */
+#define DP_LANE0_1_STATUS_PHY_REPEATER1 0xf0030 /* 1.3 */
+#define DP_LANE2_3_STATUS_PHY_REPEATER1 0xf0031 /* 1.3 */
+#define DP_LANE_ALIGN_STATUS_UPDATED_PHY_REPEATER1 0xf0032 /* 1.3 */
+#define DP_ADJUST_REQUEST_LANE0_1_PHY_REPEATER1 0xf0033 /* 1.3 */
+#define DP_ADJUST_REQUEST_LANE2_3_PHY_REPEATER1 0xf0034 /* 1.3 */
+#define DP_SYMBOL_ERROR_COUNT_LANE0_PHY_REPEATER1 0xf0035 /* 1.3 */
+#define DP_SYMBOL_ERROR_COUNT_LANE1_PHY_REPEATER1 0xf0037 /* 1.3 */
+#define DP_SYMBOL_ERROR_COUNT_LANE2_PHY_REPEATER1 0xf0039 /* 1.3 */
+#define DP_SYMBOL_ERROR_COUNT_LANE3_PHY_REPEATER1 0xf003b /* 1.3 */
+#define DP_FEC_STATUS_PHY_REPEATER1 0xf0290 /* 1.4 */
+#define DP_FEC_ERROR_COUNT_PHY_REPEATER1 0xf0291 /* 1.4 */
+#define DP_FEC_CAPABILITY_PHY_REPEATER1 0xf0294 /* 1.4a */
+
+/* Repeater modes */
+#define DP_PHY_REPEATER_MODE_TRANSPARENT 0x55 /* 1.3 */
+#define DP_PHY_REPEATER_MODE_NON_TRANSPARENT 0xaa /* 1.3 */
+
/* DP HDCP message start offsets in DPCD address space */
#define DP_HDCP_2_2_AKE_INIT_OFFSET DP_HDCP_2_2_REG_RTX_OFFSET
#define DP_HDCP_2_2_AKE_SEND_CERT_OFFSET DP_HDCP_2_2_REG_CERT_RX_OFFSET
@@ -1049,6 +1132,8 @@ u8 drm_dp_get_adjust_request_voltage(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
u8 drm_dp_get_adjust_request_pre_emphasis(const u8 link_status[DP_LINK_STATUS_SIZE],
int lane);
+u8 drm_dp_get_adjust_request_post_cursor(const u8 link_status[DP_LINK_STATUS_SIZE],
+ unsigned int lane);
#define DP_BRANCH_OUI_HEADER_SIZE 0xc
#define DP_RECEIVER_CAP_SIZE 0xf
@@ -1144,6 +1229,13 @@ drm_dp_enhanced_frame_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
}
static inline bool
+drm_dp_fast_training_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x11 &&
+ (dpcd[DP_MAX_DOWNSPREAD] & DP_NO_AUX_HANDSHAKE_LINK_TRAINING);
+}
+
+static inline bool
drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
return dpcd[DP_DPCD_REV] >= 0x12 &&
@@ -1208,6 +1300,19 @@ drm_dp_sink_supports_fec(const u8 fec_capable)
return fec_capable & DP_FEC_CAPABLE;
}
+static inline bool
+drm_dp_channel_coding_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_MAIN_LINK_CHANNEL_CODING] & DP_CAP_ANSI_8B10B;
+}
+
+static inline bool
+drm_dp_alternate_scrambler_reset_cap(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_EDP_CONFIGURATION_CAP] &
+ DP_ALTERNATE_SCRAMBLER_RESET_CAP;
+}
+
/*
* DisplayPort AUX channel
*/
@@ -1230,20 +1335,19 @@ struct drm_dp_aux_msg {
struct cec_adapter;
struct edid;
+struct drm_connector;
/**
* struct drm_dp_aux_cec - DisplayPort CEC-Tunneling-over-AUX
* @lock: mutex protecting this struct
* @adap: the CEC adapter for CEC-Tunneling-over-AUX support.
- * @name: name of the CEC adapter
- * @parent: parent device of the CEC adapter
+ * @connector: the connector this CEC adapter is associated with
* @unregister_work: unregister the CEC adapter
*/
struct drm_dp_aux_cec {
struct mutex lock;
struct cec_adapter *adap;
- const char *name;
- struct device *parent;
+ struct drm_connector *connector;
struct delayed_work unregister_work;
};
@@ -1353,22 +1457,6 @@ static inline ssize_t drm_dp_dpcd_writeb(struct drm_dp_aux *aux,
int drm_dp_dpcd_read_link_status(struct drm_dp_aux *aux,
u8 status[DP_LINK_STATUS_SIZE]);
-/*
- * DisplayPort link
- */
-#define DP_LINK_CAP_ENHANCED_FRAMING (1 << 0)
-
-struct drm_dp_link {
- unsigned char revision;
- unsigned int rate;
- unsigned int num_lanes;
- unsigned long capabilities;
-};
-
-int drm_dp_link_probe(struct drm_dp_aux *aux, struct drm_dp_link *link);
-int drm_dp_link_power_up(struct drm_dp_aux *aux, struct drm_dp_link *link);
-int drm_dp_link_power_down(struct drm_dp_aux *aux, struct drm_dp_link *link);
-int drm_dp_link_configure(struct drm_dp_aux *aux, struct drm_dp_link *link);
int drm_dp_downstream_max_clock(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4]);
int drm_dp_downstream_max_bpc(const u8 dpcd[DP_RECEIVER_CAP_SIZE],
@@ -1377,6 +1465,7 @@ int drm_dp_downstream_id(struct drm_dp_aux *aux, char id[6]);
void drm_dp_downstream_debug(struct seq_file *m, const u8 dpcd[DP_RECEIVER_CAP_SIZE],
const u8 port_cap[4], struct drm_dp_aux *aux);
+void drm_dp_remote_aux_init(struct drm_dp_aux *aux);
void drm_dp_aux_init(struct drm_dp_aux *aux);
int drm_dp_aux_register(struct drm_dp_aux *aux);
void drm_dp_aux_unregister(struct drm_dp_aux *aux);
@@ -1434,6 +1523,13 @@ enum drm_dp_quirk {
* The driver should ignore SINK_COUNT during detection.
*/
DP_DPCD_QUIRK_NO_SINK_COUNT,
+ /**
+ * @DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD:
+ *
+ * The device supports MST DSC despite not supporting Virtual DPCD.
+ * The DSC caps can be read from the physical aux instead.
+ */
+ DP_DPCD_QUIRK_DSC_WITHOUT_VIRTUAL_DPCD,
};
/**
@@ -1451,8 +1547,8 @@ drm_dp_has_quirk(const struct drm_dp_desc *desc, enum drm_dp_quirk quirk)
#ifdef CONFIG_DRM_DP_CEC
void drm_dp_cec_irq(struct drm_dp_aux *aux);
-void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
- struct device *parent);
+void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
+ struct drm_connector *connector);
void drm_dp_cec_unregister_connector(struct drm_dp_aux *aux);
void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid);
void drm_dp_cec_unset_edid(struct drm_dp_aux *aux);
@@ -1461,9 +1557,9 @@ static inline void drm_dp_cec_irq(struct drm_dp_aux *aux)
{
}
-static inline void drm_dp_cec_register_connector(struct drm_dp_aux *aux,
- const char *name,
- struct device *parent)
+static inline void
+drm_dp_cec_register_connector(struct drm_dp_aux *aux,
+ struct drm_connector *connector)
{
}
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 2ba6253ea6d3..41725d88d27e 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -26,6 +26,26 @@
#include <drm/drm_dp_helper.h>
#include <drm/drm_atomic.h>
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+#include <linux/stackdepot.h>
+#include <linux/timekeeping.h>
+
+enum drm_dp_mst_topology_ref_type {
+ DRM_DP_MST_TOPOLOGY_REF_GET,
+ DRM_DP_MST_TOPOLOGY_REF_PUT,
+};
+
+struct drm_dp_mst_topology_ref_history {
+ struct drm_dp_mst_topology_ref_entry {
+ enum drm_dp_mst_topology_ref_type type;
+ int count;
+ ktime_t ts_nsec;
+ depot_stack_handle_t backtrace;
+ } *entries;
+ int len;
+};
+#endif /* IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS) */
+
struct drm_dp_mst_branch;
/**
@@ -45,21 +65,31 @@ struct drm_dp_vcpi {
/**
* struct drm_dp_mst_port - MST port
* @port_num: port number
- * @input: if this port is an input port.
- * @mcs: message capability status - DP 1.2 spec.
- * @ddps: DisplayPort Device Plug Status - DP 1.2
- * @pdt: Peer Device Type
- * @ldps: Legacy Device Plug Status
- * @dpcd_rev: DPCD revision of device on this port
- * @num_sdp_streams: Number of simultaneous streams
- * @num_sdp_stream_sinks: Number of stream sinks
- * @available_pbn: Available bandwidth for this port.
+ * @input: if this port is an input port. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @mcs: message capability status - DP 1.2 spec. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @ddps: DisplayPort Device Plug Status - DP 1.2. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @pdt: Peer Device Type. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @ldps: Legacy Device Plug Status. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @dpcd_rev: DPCD revision of device on this port. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @num_sdp_streams: Number of simultaneous streams. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @num_sdp_stream_sinks: Number of stream sinks. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
+ * @full_pbn: Max possible bandwidth for this port. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
* @next: link to next port on this branch device
- * @mstb: branch device attach below this port
- * @aux: i2c aux transport to talk to device connected to this port.
+ * @aux: i2c aux transport to talk to device connected to this port, protected
+ * by &drm_dp_mst_topology_mgr.base.lock.
* @parent: branch device parent of this port
* @vcpi: Virtual Channel Payload info for this port.
- * @connector: DRM connector this port is connected to.
+ * @connector: DRM connector this port is connected to. Protected by
+ * &drm_dp_mst_topology_mgr.base.lock.
* @mgr: topology manager this port lives under.
*
* This structure represents an MST port endpoint on a device somewhere
@@ -79,6 +109,14 @@ struct drm_dp_mst_port {
*/
struct kref malloc_kref;
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ /**
+ * @topology_ref_history: A history of each topology
+ * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
+ */
+ struct drm_dp_mst_topology_ref_history topology_ref_history;
+#endif
+
u8 port_num;
bool input;
bool mcs;
@@ -88,9 +126,19 @@ struct drm_dp_mst_port {
u8 dpcd_rev;
u8 num_sdp_streams;
u8 num_sdp_stream_sinks;
- uint16_t available_pbn;
+ uint16_t full_pbn;
struct list_head next;
- struct drm_dp_mst_branch *mstb; /* pointer to an mstb if this port has one */
+ /**
+ * @mstb: the branch device connected to this port, if there is one.
+ * This should be considered protected for reading by
+ * &drm_dp_mst_topology_mgr.lock. There are two exceptions to this:
+ * &drm_dp_mst_topology_mgr.up_req_work and
+ * &drm_dp_mst_topology_mgr.work, which do not grab
+ * &drm_dp_mst_topology_mgr.lock during reads but are the only
+ * updaters of this list and are protected from writing concurrently
+ * by &drm_dp_mst_topology_mgr.probe_lock.
+ */
+ struct drm_dp_mst_branch *mstb;
struct drm_dp_aux aux; /* i2c bus for this port? */
struct drm_dp_mst_branch *parent;
@@ -108,6 +156,8 @@ struct drm_dp_mst_port {
* audio-capable.
*/
bool has_audio;
+
+ bool fec_capable;
};
/**
@@ -116,7 +166,6 @@ struct drm_dp_mst_port {
* @lct: Link count total to talk to this branch device.
* @num_ports: number of ports on the branch.
* @msg_slots: one bit per transmitted msg slot.
- * @ports: linked list of ports on this branch.
* @port_parent: pointer to the port parent, NULL if toplevel.
* @mgr: topology manager for this branch device.
* @tx_slots: transmission slots for this device.
@@ -143,11 +192,35 @@ struct drm_dp_mst_branch {
*/
struct kref malloc_kref;
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ /**
+ * @topology_ref_history: A history of each topology
+ * reference/dereference. See CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS.
+ */
+ struct drm_dp_mst_topology_ref_history topology_ref_history;
+#endif
+
+ /**
+ * @destroy_next: linked-list entry used by
+ * drm_dp_delayed_destroy_work()
+ */
+ struct list_head destroy_next;
+
u8 rad[8];
u8 lct;
int num_ports;
int msg_slots;
+ /**
+ * @ports: the list of ports on this branch device. This should be
+ * considered protected for reading by &drm_dp_mst_topology_mgr.lock.
+ * There are two exceptions to this:
+ * &drm_dp_mst_topology_mgr.up_req_work and
+ * &drm_dp_mst_topology_mgr.work, which do not grab
+ * &drm_dp_mst_topology_mgr.lock during reads but are the only
+ * updaters of this list and are protected from updating the list
+ * concurrently by @drm_dp_mst_topology_mgr.probe_lock
+ */
struct list_head ports;
/* list of tx ops queue for this port */
@@ -287,7 +360,7 @@ struct drm_dp_remote_dpcd_write {
struct drm_dp_remote_i2c_read {
u8 num_transactions;
u8 port_number;
- struct {
+ struct drm_dp_remote_i2c_read_tx {
u8 i2c_dev_id;
u8 num_bytes;
u8 *bytes;
@@ -312,6 +385,7 @@ struct drm_dp_port_number_req {
struct drm_dp_enum_path_resources_ack_reply {
u8 port_number;
+ bool fec_capable;
u16 full_payload_bw_number;
u16 avail_payload_bw_number;
};
@@ -334,7 +408,7 @@ struct drm_dp_resource_status_notify {
struct drm_dp_query_payload_ack_reply {
u8 port_number;
- u8 allocated_pbn;
+ u16 allocated_pbn;
};
struct drm_dp_sideband_msg_req_body {
@@ -428,6 +502,8 @@ struct drm_dp_payload {
struct drm_dp_vcpi_allocation {
struct drm_dp_mst_port *port;
int vcpi;
+ int pbn;
+ bool dsc_enabled;
struct list_head next;
};
@@ -481,28 +557,39 @@ struct drm_dp_mst_topology_mgr {
int conn_base_id;
/**
- * @down_rep_recv: Message receiver state for down replies. This and
- * @up_req_recv are only ever access from the work item, which is
- * serialised.
+ * @down_rep_recv: Message receiver state for down replies.
*/
struct drm_dp_sideband_msg_rx down_rep_recv;
/**
- * @up_req_recv: Message receiver state for up requests. This and
- * @down_rep_recv are only ever access from the work item, which is
- * serialised.
+ * @up_req_recv: Message receiver state for up requests.
*/
struct drm_dp_sideband_msg_rx up_req_recv;
/**
- * @lock: protects mst state, primary, dpcd.
+ * @lock: protects @mst_state, @mst_primary, @dpcd, and
+ * @payload_id_table_cleared.
*/
struct mutex lock;
/**
+ * @probe_lock: Prevents @work and @up_req_work, the only writers of
+ * &drm_dp_mst_port.mstb and &drm_dp_mst_branch.ports, from racing
+ * while they update the topology.
+ */
+ struct mutex probe_lock;
+
+ /**
* @mst_state: If this manager is enabled for an MST capable port. False
* if no MST sink/branch devices is connected.
*/
- bool mst_state;
+ bool mst_state : 1;
+
+ /**
+ * @payload_id_table_cleared: Whether or not we've cleared the payload
+ * ID table for @mst_primary. Protected by @lock.
+ */
+ bool payload_id_table_cleared : 1;
+
/**
* @mst_primary: Pointer to the primary/first branch device.
*/
@@ -531,6 +618,12 @@ struct drm_dp_mst_topology_mgr {
* &drm_dp_sideband_msg_tx.state once they are queued
*/
struct mutex qlock;
+
+ /**
+ * @is_waiting_for_dwn_reply: indicate whether is waiting for down reply
+ */
+ bool is_waiting_for_dwn_reply;
+
/**
* @tx_msg_downq: List of pending down replies.
*/
@@ -575,18 +668,49 @@ struct drm_dp_mst_topology_mgr {
struct work_struct tx_work;
/**
- * @destroy_connector_list: List of to be destroyed connectors.
+ * @destroy_port_list: List of to be destroyed connectors.
+ */
+ struct list_head destroy_port_list;
+ /**
+ * @destroy_branch_device_list: List of to be destroyed branch
+ * devices.
*/
- struct list_head destroy_connector_list;
+ struct list_head destroy_branch_device_list;
/**
- * @destroy_connector_lock: Protects @connector_list.
+ * @delayed_destroy_lock: Protects @destroy_port_list and
+ * @destroy_branch_device_list.
*/
- struct mutex destroy_connector_lock;
+ struct mutex delayed_destroy_lock;
/**
- * @destroy_connector_work: Work item to destroy connectors. Needed to
- * avoid locking inversion.
+ * @delayed_destroy_work: Work item to destroy MST port and branch
+ * devices, needed to avoid locking inversion.
*/
- struct work_struct destroy_connector_work;
+ struct work_struct delayed_destroy_work;
+
+ /**
+ * @up_req_list: List of pending up requests from the topology that
+ * need to be processed, in chronological order.
+ */
+ struct list_head up_req_list;
+ /**
+ * @up_req_lock: Protects @up_req_list
+ */
+ struct mutex up_req_lock;
+ /**
+ * @up_req_work: Work item to process up requests received from the
+ * topology. Needed to avoid blocking hotplug handling and sideband
+ * transmissions.
+ */
+ struct work_struct up_req_work;
+
+#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
+ /**
+ * @topology_ref_history_lock: protects
+ * &drm_dp_mst_port.topology_ref_history and
+ * &drm_dp_mst_branch.topology_ref_history.
+ */
+ struct mutex topology_ref_history_lock;
+#endif
};
int drm_dp_mst_topology_mgr_init(struct drm_dp_mst_topology_mgr *mgr,
@@ -603,15 +727,18 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
int drm_dp_mst_hpd_irq(struct drm_dp_mst_topology_mgr *mgr, u8 *esi, bool *handled);
-enum drm_connector_status drm_dp_mst_detect_port(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
+int
+drm_dp_mst_detect_port(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port);
bool drm_dp_mst_port_has_audio(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port);
struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
-int drm_dp_calc_pbn_mode(int clock, int bpp);
-
+int drm_dp_calc_pbn_mode(int clock, int bpp, bool dsc);
bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, int pbn, int slots);
@@ -642,7 +769,8 @@ void drm_dp_mst_dump_topology(struct seq_file *m,
void drm_dp_mst_topology_mgr_suspend(struct drm_dp_mst_topology_mgr *mgr);
int __must_check
-drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr);
+drm_dp_mst_topology_mgr_resume(struct drm_dp_mst_topology_mgr *mgr,
+ bool sync);
ssize_t drm_dp_mst_dpcd_read(struct drm_dp_aux *aux,
unsigned int offset, void *buffer, size_t size);
@@ -659,7 +787,15 @@ struct drm_dp_mst_topology_state *drm_atomic_get_mst_topology_state(struct drm_a
int __must_check
drm_dp_atomic_find_vcpi_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port, int pbn);
+ struct drm_dp_mst_port *port, int pbn,
+ int pbn_div);
+int drm_dp_mst_atomic_enable_dsc(struct drm_atomic_state *state,
+ struct drm_dp_mst_port *port,
+ int pbn, int pbn_div,
+ bool enable);
+int __must_check
+drm_dp_mst_add_affected_dsc_crtcs(struct drm_atomic_state *state,
+ struct drm_dp_mst_topology_mgr *mgr);
int __must_check
drm_dp_atomic_release_vcpi_slots(struct drm_atomic_state *state,
struct drm_dp_mst_topology_mgr *mgr,
@@ -671,6 +807,8 @@ int __must_check drm_dp_mst_atomic_check(struct drm_atomic_state *state);
void drm_dp_mst_get_port_malloc(struct drm_dp_mst_port *port);
void drm_dp_mst_put_port_malloc(struct drm_dp_mst_port *port);
+struct drm_dp_aux *drm_dp_mst_dsc_aux_for_port(struct drm_dp_mst_port *port);
+
extern const struct drm_private_state_funcs drm_dp_mst_topology_state_funcs;
/**
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 8976afe48c1c..cf13470810a5 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -778,8 +778,6 @@ struct drm_driver {
int dev_priv_size;
};
-extern unsigned int drm_debug;
-
int drm_dev_init(struct drm_device *dev,
struct drm_driver *driver,
struct device *parent);
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index b9719418c3d2..f0b03d401c27 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -368,6 +368,10 @@ drm_hdmi_avi_infoframe_colorspace(struct hdmi_avi_infoframe *frame,
const struct drm_connector_state *conn_state);
void
+drm_hdmi_avi_infoframe_bars(struct hdmi_avi_infoframe *frame,
+ const struct drm_connector_state *conn_state);
+
+void
drm_hdmi_avi_infoframe_quant_range(struct hdmi_avi_infoframe *frame,
struct drm_connector *connector,
const struct drm_display_mode *mode,
@@ -481,7 +485,6 @@ int drm_add_edid_modes(struct drm_connector *connector, struct edid *edid);
int drm_add_override_edid_modes(struct drm_connector *connector);
u8 drm_match_cea_mode(const struct drm_display_mode *to_match);
-enum hdmi_picture_aspect drm_get_cea_aspect_ratio(const u8 video_code);
bool drm_detect_hdmi_monitor(struct edid *edid);
bool drm_detect_monitor_audio(struct edid *edid);
enum hdmi_quantization_range
diff --git a/include/drm/drm_encoder.h b/include/drm/drm_encoder.h
index 70cfca03d812..5623994b6e9e 100644
--- a/include/drm/drm_encoder.h
+++ b/include/drm/drm_encoder.h
@@ -140,7 +140,7 @@ struct drm_encoder {
* @possible_crtcs: Bitmask of potential CRTC bindings, using
* drm_crtc_index() as the index into the bitfield. The driver must set
* the bits for all &drm_crtc objects this encoder can be connected to
- * before calling drm_encoder_init().
+ * before calling drm_dev_register().
*
* In reality almost every driver gets this wrong.
*
@@ -154,7 +154,7 @@ struct drm_encoder {
* using drm_encoder_index() as the index into the bitfield. The driver
* must set the bits for all &drm_encoder objects which can clone a
* &drm_crtc together with this encoder before calling
- * drm_encoder_init(). Drivers should set the bit representing the
+ * drm_dev_register(). Drivers should set the bit representing the
* encoder itself, too. Cloning bits should be set such that when two
* encoders can be used in a cloned configuration, they both should have
* each another bits set.
@@ -172,7 +172,12 @@ struct drm_encoder {
* &drm_connector_state.crtc.
*/
struct drm_crtc *crtc;
- struct drm_bridge *bridge;
+
+ /**
+ * @bridge_chain: Bridges attached to this encoder.
+ */
+ struct list_head bridge_chain;
+
const struct drm_encoder_funcs *funcs;
const struct drm_encoder_helper_funcs *helper_private;
};
@@ -198,7 +203,7 @@ static inline unsigned int drm_encoder_index(const struct drm_encoder *encoder)
}
/**
- * drm_encoder_mask - find the mask of a registered ENCODER
+ * drm_encoder_mask - find the mask of a registered encoder
* @encoder: encoder to find mask for
*
* Given a registered encoder, return the mask bit of that encoder for an
diff --git a/include/drm/drm_fb_cma_helper.h b/include/drm/drm_fb_cma_helper.h
index 4becb09975a4..795aea1d0a25 100644
--- a/include/drm/drm_fb_cma_helper.h
+++ b/include/drm/drm_fb_cma_helper.h
@@ -2,6 +2,8 @@
#ifndef __DRM_FB_CMA_HELPER_H__
#define __DRM_FB_CMA_HELPER_H__
+#include <linux/types.h>
+
struct drm_framebuffer;
struct drm_plane_state;
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index c8a8ae2a678a..1c6633da0f91 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -231,11 +231,8 @@ void drm_fb_helper_fill_info(struct fb_info *info,
struct drm_fb_helper *fb_helper,
struct drm_fb_helper_surface_size *sizes);
-void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper);
-
void drm_fb_helper_deferred_io(struct fb_info *info,
struct list_head *pagelist);
-int drm_fb_helper_defio_init(struct drm_fb_helper *fb_helper);
ssize_t drm_fb_helper_sys_read(struct fb_info *info, char __user *buf,
size_t count, loff_t *ppos);
@@ -270,18 +267,9 @@ int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
int drm_fb_helper_debug_enter(struct fb_info *info);
int drm_fb_helper_debug_leave(struct fb_info *info);
-int drm_fb_helper_fbdev_setup(struct drm_device *dev,
- struct drm_fb_helper *fb_helper,
- const struct drm_fb_helper_funcs *funcs,
- unsigned int preferred_bpp,
- unsigned int max_conn_count);
-void drm_fb_helper_fbdev_teardown(struct drm_device *dev);
-
void drm_fb_helper_lastclose(struct drm_device *dev);
void drm_fb_helper_output_poll_changed(struct drm_device *dev);
-int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_surface_size *sizes);
int drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp);
#else
static inline void drm_fb_helper_prepare(struct drm_device *dev,
@@ -364,10 +352,6 @@ static inline int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
return 0;
}
-static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
-{
-}
-
static inline void drm_fb_helper_deferred_io(struct fb_info *info,
struct list_head *pagelist)
{
@@ -453,24 +437,6 @@ static inline int drm_fb_helper_debug_leave(struct fb_info *info)
return 0;
}
-static inline int
-drm_fb_helper_fbdev_setup(struct drm_device *dev,
- struct drm_fb_helper *fb_helper,
- const struct drm_fb_helper_funcs *funcs,
- unsigned int preferred_bpp,
- unsigned int max_conn_count)
-{
- /* So drivers can use it to free the struct */
- dev->fb_helper = fb_helper;
-
- return 0;
-}
-
-static inline void drm_fb_helper_fbdev_teardown(struct drm_device *dev)
-{
- dev->fb_helper = NULL;
-}
-
static inline void drm_fb_helper_lastclose(struct drm_device *dev)
{
}
@@ -480,13 +446,6 @@ static inline void drm_fb_helper_output_poll_changed(struct drm_device *dev)
}
static inline int
-drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper,
- struct drm_fb_helper_surface_size *sizes)
-{
- return 0;
-}
-
-static inline int
drm_fbdev_generic_setup(struct drm_device *dev, unsigned int preferred_bpp)
{
return 0;
@@ -539,18 +498,16 @@ drm_fb_helper_remove_conflicting_framebuffers(struct apertures_struct *a,
/**
* drm_fb_helper_remove_conflicting_pci_framebuffers - remove firmware-configured framebuffers for PCI devices
* @pdev: PCI device
- * @resource_id: index of PCI BAR configuring framebuffer memory
* @name: requesting driver name
*
* This function removes framebuffer devices (eg. initialized by firmware)
- * using memory range configured for @pdev's BAR @resource_id.
+ * using memory range configured for any of @pdev's memory bars.
*
* The function assumes that PCI device with shadowed ROM drives a primary
* display and so kicks out vga16fb.
*/
static inline int
drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
- int resource_id,
const char *name)
{
int ret = 0;
@@ -560,7 +517,7 @@ drm_fb_helper_remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
* otherwise the vga fbdev driver falls over.
*/
#if IS_REACHABLE(CONFIG_FB)
- ret = remove_conflicting_pci_framebuffers(pdev, resource_id, name);
+ ret = remove_conflicting_pci_framebuffers(pdev, name);
#endif
if (ret == 0)
ret = vga_remove_vgacon(pdev);
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
index 67af60bb527a..8b099b347817 100644
--- a/include/drm/drm_file.h
+++ b/include/drm/drm_file.h
@@ -42,6 +42,7 @@ struct dma_fence;
struct drm_file;
struct drm_device;
struct device;
+struct file;
/*
* FIXME: Not sure we want to have drm_minor here in the end, but to avoid
@@ -387,4 +388,6 @@ void drm_event_cancel_free(struct drm_device *dev,
void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e);
void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
+struct file *mock_drm_getfile(struct drm_minor *minor, unsigned int flags);
+
#endif /* _DRM_FILE_H_ */
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index 306d1efeb5e0..156b122c0ad5 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -78,7 +78,7 @@ struct drm_format_info {
* triplet @char_per_block, @block_w, @block_h for better
* describing the pixel format.
*/
- u8 cpp[3];
+ u8 cpp[4];
/**
* @char_per_block:
@@ -104,7 +104,7 @@ struct drm_format_info {
* information from their drm_mode_config.get_format_info hook
* if they want the core to be validating the pitch.
*/
- u8 char_per_block[3];
+ u8 char_per_block[4];
};
/**
@@ -113,7 +113,7 @@ struct drm_format_info {
* Block width in pixels, this is intended to be accessed through
* drm_format_info_block_width()
*/
- u8 block_w[3];
+ u8 block_w[4];
/**
* @block_h:
@@ -121,7 +121,7 @@ struct drm_format_info {
* Block height in pixels, this is intended to be accessed through
* drm_format_info_block_height()
*/
- u8 block_h[3];
+ u8 block_h[4];
/** @hsub: Horizontal chroma subsampling factor */
u8 hsub;
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 6aaba14f5972..0b375069cd48 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -151,6 +151,19 @@ struct drm_gem_object_funcs {
void (*vunmap)(struct drm_gem_object *obj, void *vaddr);
/**
+ * @mmap:
+ *
+ * Handle mmap() of the gem object, setup vma accordingly.
+ *
+ * This callback is optional.
+ *
+ * The callback is used by by both drm_gem_mmap_obj() and
+ * drm_gem_prime_mmap(). When @mmap is present @vm_ops is not
+ * used, the @mmap callback must set vma->vm_ops instead.
+ */
+ int (*mmap)(struct drm_gem_object *obj, struct vm_area_struct *vma);
+
+ /**
* @vm_ops:
*
* Virtual memory operations used with mmap.
diff --git a/include/drm/drm_gem_shmem_helper.h b/include/drm/drm_gem_shmem_helper.h
index 7865e6b5d36c..294b2931c4cc 100644
--- a/include/drm/drm_gem_shmem_helper.h
+++ b/include/drm/drm_gem_shmem_helper.h
@@ -96,37 +96,16 @@ struct drm_gem_shmem_object {
* The address are un-mapped when the count reaches zero.
*/
unsigned int vmap_use_count;
+
+ /**
+ * @map_cached: map object cached (instead of using writecombine).
+ */
+ bool map_cached;
};
#define to_drm_gem_shmem_obj(obj) \
container_of(obj, struct drm_gem_shmem_object, base)
-/**
- * DEFINE_DRM_GEM_SHMEM_FOPS() - Macro to generate file operations for shmem drivers
- * @name: name for the generated structure
- *
- * This macro autogenerates a suitable &struct file_operations for shmem based
- * drivers, which can be assigned to &drm_driver.fops. Note that this structure
- * cannot be shared between drivers, because it contains a reference to the
- * current module using THIS_MODULE.
- *
- * Note that the declaration is already marked as static - if you need a
- * non-static version of this you're probably doing it wrong and will break the
- * THIS_MODULE reference by accident.
- */
-#define DEFINE_DRM_GEM_SHMEM_FOPS(name) \
- static const struct file_operations name = {\
- .owner = THIS_MODULE,\
- .open = drm_open,\
- .release = drm_release,\
- .unlocked_ioctl = drm_ioctl,\
- .compat_ioctl = drm_compat_ioctl,\
- .poll = drm_poll,\
- .read = drm_read,\
- .llseek = noop_llseek,\
- .mmap = drm_gem_shmem_mmap, \
- }
-
struct drm_gem_shmem_object *drm_gem_shmem_create(struct drm_device *dev, size_t size);
void drm_gem_shmem_free_object(struct drm_gem_object *obj);
@@ -156,9 +135,7 @@ drm_gem_shmem_create_with_handle(struct drm_file *file_priv,
int drm_gem_shmem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
-int drm_gem_shmem_mmap(struct file *filp, struct vm_area_struct *vma);
-
-extern const struct vm_operations_struct drm_gem_shmem_vm_ops;
+int drm_gem_shmem_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
void drm_gem_shmem_print_info(struct drm_printer *p, unsigned int indent,
const struct drm_gem_object *obj);
diff --git a/include/drm/drm_gem_ttm_helper.h b/include/drm/drm_gem_ttm_helper.h
new file mode 100644
index 000000000000..118cef76f84f
--- /dev/null
+++ b/include/drm/drm_gem_ttm_helper.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#ifndef DRM_GEM_TTM_HELPER_H
+#define DRM_GEM_TTM_HELPER_H
+
+#include <linux/kernel.h>
+
+#include <drm/drm_gem.h>
+#include <drm/drm_device.h>
+#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
+
+#define drm_gem_ttm_of_gem(gem_obj) \
+ container_of(gem_obj, struct ttm_buffer_object, base)
+
+void drm_gem_ttm_print_info(struct drm_printer *p, unsigned int indent,
+ const struct drm_gem_object *gem);
+int drm_gem_ttm_mmap(struct drm_gem_object *gem,
+ struct vm_area_struct *vma);
+
+#endif
diff --git a/include/drm/drm_gem_vram_helper.h b/include/drm/drm_gem_vram_helper.h
index ac217d768456..573e9fd109bf 100644
--- a/include/drm/drm_gem_vram_helper.h
+++ b/include/drm/drm_gem_vram_helper.h
@@ -3,18 +3,25 @@
#ifndef DRM_GEM_VRAM_HELPER_H
#define DRM_GEM_VRAM_HELPER_H
+#include <drm/drm_file.h>
#include <drm/drm_gem.h>
+#include <drm/drm_ioctl.h>
#include <drm/ttm/ttm_bo_api.h>
+#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+
#include <linux/kernel.h> /* for container_of() */
struct drm_mode_create_dumb;
-struct drm_vram_mm_funcs;
+struct drm_plane;
+struct drm_plane_state;
+struct drm_simple_display_pipe;
struct filp;
struct vm_area_struct;
#define DRM_GEM_VRAM_PL_FLAG_VRAM TTM_PL_FLAG_VRAM
#define DRM_GEM_VRAM_PL_FLAG_SYSTEM TTM_PL_FLAG_SYSTEM
+#define DRM_GEM_VRAM_PL_FLAG_TOPDOWN TTM_PL_FLAG_TOPDOWN
/*
* Buffer-object helpers
@@ -34,11 +41,26 @@ struct vm_area_struct;
* backed by VRAM. It can be used for simple framebuffer devices with
* dedicated memory. The buffer object can be evicted to system memory if
* video memory becomes scarce.
+ *
+ * GEM VRAM objects perform reference counting for pin and mapping
+ * operations. So a buffer object that has been pinned N times with
+ * drm_gem_vram_pin() must be unpinned N times with
+ * drm_gem_vram_unpin(). The same applies to pairs of
+ * drm_gem_vram_kmap() and drm_gem_vram_kunmap(), as well as pairs of
+ * drm_gem_vram_vmap() and drm_gem_vram_vunmap().
*/
struct drm_gem_vram_object {
struct ttm_buffer_object bo;
struct ttm_bo_kmap_obj kmap;
+ /**
+ * @kmap_use_count:
+ *
+ * Reference count on the virtual address.
+ * The address are un-mapped when the count reaches zero.
+ */
+ unsigned int kmap_use_count;
+
/* Supported placements are %TTM_PL_VRAM and %TTM_PL_SYSTEM */
struct ttm_placement placement;
struct ttm_place placements[2];
@@ -71,10 +93,8 @@ static inline struct drm_gem_vram_object *drm_gem_vram_of_gem(
}
struct drm_gem_vram_object *drm_gem_vram_create(struct drm_device *dev,
- struct ttm_bo_device *bdev,
size_t size,
- unsigned long pg_align,
- bool interruptible);
+ unsigned long pg_align);
void drm_gem_vram_put(struct drm_gem_vram_object *gbo);
u64 drm_gem_vram_mmap_offset(struct drm_gem_vram_object *gbo);
s64 drm_gem_vram_offset(struct drm_gem_vram_object *gbo);
@@ -83,27 +103,16 @@ int drm_gem_vram_unpin(struct drm_gem_vram_object *gbo);
void *drm_gem_vram_kmap(struct drm_gem_vram_object *gbo, bool map,
bool *is_iomem);
void drm_gem_vram_kunmap(struct drm_gem_vram_object *gbo);
+void *drm_gem_vram_vmap(struct drm_gem_vram_object *gbo);
+void drm_gem_vram_vunmap(struct drm_gem_vram_object *gbo, void *vaddr);
int drm_gem_vram_fill_create_dumb(struct drm_file *file,
struct drm_device *dev,
- struct ttm_bo_device *bdev,
unsigned long pg_align,
- bool interruptible,
+ unsigned long pitch_align,
struct drm_mode_create_dumb *args);
/*
- * Helpers for struct ttm_bo_driver
- */
-
-void drm_gem_vram_bo_driver_evict_flags(struct ttm_buffer_object *bo,
- struct ttm_placement *pl);
-
-int drm_gem_vram_bo_driver_verify_access(struct ttm_buffer_object *bo,
- struct file *filp);
-
-extern const struct drm_vram_mm_funcs drm_gem_vram_mm_funcs;
-
-/*
* Helpers for struct drm_driver
*/
@@ -114,6 +123,28 @@ int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
struct drm_device *dev,
uint32_t handle, uint64_t *offset);
+/*
+ * Helpers for struct drm_plane_helper_funcs
+ */
+int
+drm_gem_vram_plane_helper_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state);
+void
+drm_gem_vram_plane_helper_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+
+/*
+ * Helpers for struct drm_simple_display_pipe_funcs
+ */
+
+int drm_gem_vram_simple_display_pipe_prepare_fb(
+ struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *new_state);
+
+void drm_gem_vram_simple_display_pipe_cleanup_fb(
+ struct drm_simple_display_pipe *pipe,
+ struct drm_plane_state *old_state);
+
/**
* define DRM_GEM_VRAM_DRIVER - default callback functions for \
&struct drm_driver
@@ -122,8 +153,56 @@ int drm_gem_vram_driver_dumb_mmap_offset(struct drm_file *file,
* &struct drm_driver with default functions.
*/
#define DRM_GEM_VRAM_DRIVER \
+ .debugfs_init = drm_vram_mm_debugfs_init, \
.dumb_create = drm_gem_vram_driver_dumb_create, \
.dumb_map_offset = drm_gem_vram_driver_dumb_mmap_offset, \
.gem_prime_mmap = drm_gem_prime_mmap
+/*
+ * VRAM memory manager
+ */
+
+/**
+ * struct drm_vram_mm - An instance of VRAM MM
+ * @vram_base: Base address of the managed video memory
+ * @vram_size: Size of the managed video memory in bytes
+ * @bdev: The TTM BO device.
+ * @funcs: TTM BO functions
+ *
+ * The fields &struct drm_vram_mm.vram_base and
+ * &struct drm_vram_mm.vrm_size are managed by VRAM MM, but are
+ * available for public read access. Use the field
+ * &struct drm_vram_mm.bdev to access the TTM BO device.
+ */
+struct drm_vram_mm {
+ uint64_t vram_base;
+ size_t vram_size;
+
+ struct ttm_bo_device bdev;
+};
+
+/**
+ * drm_vram_mm_of_bdev() - \
+ Returns the container of type &struct ttm_bo_device for field bdev.
+ * @bdev: the TTM BO device
+ *
+ * Returns:
+ * The containing instance of &struct drm_vram_mm
+ */
+static inline struct drm_vram_mm *drm_vram_mm_of_bdev(
+ struct ttm_bo_device *bdev)
+{
+ return container_of(bdev, struct drm_vram_mm, bdev);
+}
+
+int drm_vram_mm_debugfs_init(struct drm_minor *minor);
+
+/*
+ * Helpers for integration with struct drm_device
+ */
+
+struct drm_vram_mm *drm_vram_helper_alloc_mm(
+ struct drm_device *dev, uint64_t vram_base, size_t vram_size);
+void drm_vram_helper_release_mm(struct drm_device *dev);
+
#endif
diff --git a/include/drm/drm_legacy.h b/include/drm/drm_legacy.h
index 58dc0c04bf99..5745710453c8 100644
--- a/include/drm/drm_legacy.h
+++ b/include/drm/drm_legacy.h
@@ -38,7 +38,9 @@
#include <drm/drm_hashtab.h>
struct drm_device;
+struct drm_driver;
struct file;
+struct pci_driver;
/*
* Legacy Support for palateontologic DRM drivers
@@ -188,8 +190,33 @@ do { \
void drm_legacy_idlelock_take(struct drm_lock_data *lock);
void drm_legacy_idlelock_release(struct drm_lock_data *lock);
-/* drm_pci.c dma alloc wrappers */
+/* drm_pci.c */
+
+#ifdef CONFIG_PCI
+
void __drm_legacy_pci_free(struct drm_device *dev, drm_dma_handle_t * dmah);
+int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
+void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
+
+#else
+
+static inline void __drm_legacy_pci_free(struct drm_device *dev,
+ drm_dma_handle_t *dmah)
+{
+}
+
+static inline int drm_legacy_pci_init(struct drm_driver *driver,
+ struct pci_driver *pdriver)
+{
+ return -EINVAL;
+}
+
+static inline void drm_legacy_pci_exit(struct drm_driver *driver,
+ struct pci_driver *pdriver)
+{
+}
+
+#endif
/* drm_memory.c */
void drm_legacy_ioremap(struct drm_local_map *map, struct drm_device *dev);
diff --git a/include/drm/drm_mipi_dsi.h b/include/drm/drm_mipi_dsi.h
index 13cf2ae59f6c..360e6377e84b 100644
--- a/include/drm/drm_mipi_dsi.h
+++ b/include/drm/drm_mipi_dsi.h
@@ -13,6 +13,7 @@
struct mipi_dsi_host;
struct mipi_dsi_device;
+struct drm_dsc_picture_parameter_set;
/* request ACK from peripheral */
#define MIPI_DSI_MSG_REQ_ACK BIT(0)
@@ -228,6 +229,9 @@ int mipi_dsi_shutdown_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_turn_on_peripheral(struct mipi_dsi_device *dsi);
int mipi_dsi_set_maximum_return_packet_size(struct mipi_dsi_device *dsi,
u16 value);
+ssize_t mipi_dsi_compression_mode(struct mipi_dsi_device *dsi, bool enable);
+ssize_t mipi_dsi_picture_parameter_set(struct mipi_dsi_device *dsi,
+ const struct drm_dsc_picture_parameter_set *pps);
ssize_t mipi_dsi_generic_write(struct mipi_dsi_device *dsi, const void *payload,
size_t size);
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 2c3bbb43c7d1..d7939c054259 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -168,8 +168,9 @@ struct drm_mm_node {
struct rb_node rb_hole_addr;
u64 __subtree_last;
u64 hole_size;
- bool allocated : 1;
- bool scanned_block : 1;
+ unsigned long flags;
+#define DRM_MM_NODE_ALLOCATED_BIT 0
+#define DRM_MM_NODE_SCANNED_BIT 1
#ifdef CONFIG_DRM_DEBUG_MM
depot_stack_handle_t stack;
#endif
@@ -253,7 +254,7 @@ struct drm_mm_scan {
*/
static inline bool drm_mm_node_allocated(const struct drm_mm_node *node)
{
- return node->allocated;
+ return test_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags);
}
/**
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 6b18c8adfe9d..5a87f1bd7a3f 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -955,9 +955,8 @@ struct drm_connector_helper_funcs {
* @atomic_best_encoder.
*
* You can leave this function to NULL if the connector is only
- * attached to a single encoder and you are using the atomic helpers.
- * In this case, the core will call drm_atomic_helper_best_encoder()
- * for you.
+ * attached to a single encoder. In this case, the core will call
+ * drm_connector_get_single_encoder() for you.
*
* RETURNS:
*
@@ -977,7 +976,7 @@ struct drm_connector_helper_funcs {
*
* This function is used by drm_atomic_helper_check_modeset().
* If it is not implemented, the core will fallback to @best_encoder
- * (or drm_atomic_helper_best_encoder() if @best_encoder is NULL).
+ * (or drm_connector_get_single_encoder() if @best_encoder is NULL).
*
* NOTE:
*
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index 7b8841065b11..4fc9a43ac45a 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -114,6 +114,15 @@ static inline bool drm_modeset_is_locked(struct drm_modeset_lock *lock)
return ww_mutex_is_locked(&lock->mutex);
}
+/**
+ * drm_modeset_lock_assert_held - equivalent to lockdep_assert_held()
+ * @lock: lock to check
+ */
+static inline void drm_modeset_lock_assert_held(struct drm_modeset_lock *lock)
+{
+ lockdep_assert_held(&lock->mutex.base);
+}
+
int drm_modeset_lock(struct drm_modeset_lock *lock,
struct drm_modeset_acquire_ctx *ctx);
int __must_check drm_modeset_lock_single_interruptible(struct drm_modeset_lock *lock);
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index ead34ab5ca4e..b9b093add92e 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -16,6 +16,18 @@ struct drm_panel;
struct drm_bridge;
struct device_node;
+/**
+ * enum drm_lvds_dual_link_pixels - Pixel order of an LVDS dual-link connection
+ * @DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS: Even pixels are expected to be generated
+ * from the first port, odd pixels from the second port
+ * @DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS: Odd pixels are expected to be generated
+ * from the first port, even pixels from the second port
+ */
+enum drm_lvds_dual_link_pixels {
+ DRM_LVDS_DUAL_LINK_EVEN_ODD_PIXELS = 0,
+ DRM_LVDS_DUAL_LINK_ODD_EVEN_PIXELS = 1,
+};
+
#ifdef CONFIG_OF
uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
struct device_node *port);
@@ -35,6 +47,8 @@ int drm_of_find_panel_or_bridge(const struct device_node *np,
int port, int endpoint,
struct drm_panel **panel,
struct drm_bridge **bridge);
+int drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
+ const struct device_node *port2);
#else
static inline uint32_t drm_of_crtc_port_mask(struct drm_device *dev,
struct device_node *port)
@@ -77,6 +91,13 @@ static inline int drm_of_find_panel_or_bridge(const struct device_node *np,
{
return -EINVAL;
}
+
+static inline int
+drm_of_lvds_get_dual_link_pixel_order(const struct device_node *port1,
+ const struct device_node *port2)
+{
+ return -EINVAL;
+}
#endif
/*
diff --git a/include/drm/drm_os_linux.h b/include/drm/drm_os_linux.h
deleted file mode 100644
index ee8d61b64f29..000000000000
--- a/include/drm/drm_os_linux.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/**
- * \file drm_os_linux.h
- * OS abstraction macros.
- */
-
-#include <linux/interrupt.h> /* For task queue support */
-#include <linux/sched/signal.h>
-#include <linux/delay.h>
-#include <linux/io-64-nonatomic-lo-hi.h>
-
-/** Current process ID */
-#define DRM_CURRENTPID task_pid_nr(current)
-#define DRM_UDELAY(d) udelay(d)
-/** Read a byte from a MMIO region */
-#define DRM_READ8(map, offset) readb(((void __iomem *)(map)->handle) + (offset))
-/** Read a word from a MMIO region */
-#define DRM_READ16(map, offset) readw(((void __iomem *)(map)->handle) + (offset))
-/** Read a dword from a MMIO region */
-#define DRM_READ32(map, offset) readl(((void __iomem *)(map)->handle) + (offset))
-/** Write a byte into a MMIO region */
-#define DRM_WRITE8(map, offset, val) writeb(val, ((void __iomem *)(map)->handle) + (offset))
-/** Write a word into a MMIO region */
-#define DRM_WRITE16(map, offset, val) writew(val, ((void __iomem *)(map)->handle) + (offset))
-/** Write a dword into a MMIO region */
-#define DRM_WRITE32(map, offset, val) writel(val, ((void __iomem *)(map)->handle) + (offset))
-
-/** Read a qword from a MMIO region - be careful using these unless you really understand them */
-#define DRM_READ64(map, offset) readq(((void __iomem *)(map)->handle) + (offset))
-/** Write a qword into a MMIO region */
-#define DRM_WRITE64(map, offset, val) writeq(val, ((void __iomem *)(map)->handle) + (offset))
-
-#define DRM_WAIT_ON( ret, queue, timeout, condition ) \
-do { \
- DECLARE_WAITQUEUE(entry, current); \
- unsigned long end = jiffies + (timeout); \
- add_wait_queue(&(queue), &entry); \
- \
- for (;;) { \
- __set_current_state(TASK_INTERRUPTIBLE); \
- if (condition) \
- break; \
- if (time_after_eq(jiffies, end)) { \
- ret = -EBUSY; \
- break; \
- } \
- schedule_timeout((HZ/100 > 1) ? HZ/100 : 1); \
- if (signal_pending(current)) { \
- ret = -EINTR; \
- break; \
- } \
- } \
- __set_current_state(TASK_RUNNING); \
- remove_wait_queue(&(queue), &entry); \
-} while (0)
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 624bd15ecfab..121f7aabccd1 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -28,6 +28,7 @@
#include <linux/errno.h>
#include <linux/list.h>
+struct backlight_device;
struct device_node;
struct drm_connector;
struct drm_device;
@@ -59,12 +60,18 @@ struct display_timing;
*
* To save power when no video data is transmitted, a driver can power down
* the panel. This is the job of the .unprepare() function.
+ *
+ * Backlight can be handled automatically if configured using
+ * drm_panel_of_backlight(). Then the driver does not need to implement the
+ * functionality to enable/disable backlight.
*/
struct drm_panel_funcs {
/**
* @prepare:
*
* Turn on panel and perform set up.
+ *
+ * This function is optional.
*/
int (*prepare)(struct drm_panel *panel);
@@ -72,6 +79,8 @@ struct drm_panel_funcs {
* @enable:
*
* Enable panel (turn on back light, etc.).
+ *
+ * This function is optional.
*/
int (*enable)(struct drm_panel *panel);
@@ -79,6 +88,8 @@ struct drm_panel_funcs {
* @disable:
*
* Disable panel (turn off back light, etc.).
+ *
+ * This function is optional.
*/
int (*disable)(struct drm_panel *panel);
@@ -86,22 +97,29 @@ struct drm_panel_funcs {
* @unprepare:
*
* Turn off panel.
+ *
+ * This function is optional.
*/
int (*unprepare)(struct drm_panel *panel);
/**
* @get_modes:
*
- * Add modes to the connector that the panel is attached to and
- * return the number of modes added.
+ * Add modes to the connector that the panel is attached to
+ * and returns the number of modes added.
+ *
+ * This function is mandatory.
*/
- int (*get_modes)(struct drm_panel *panel);
+ int (*get_modes)(struct drm_panel *panel,
+ struct drm_connector *connector);
/**
* @get_timings:
*
* Copy display timings into the provided array and return
* the number of display timings available.
+ *
+ * This function is optional.
*/
int (*get_timings)(struct drm_panel *panel, unsigned int num_timings,
struct display_timing *timings);
@@ -112,32 +130,38 @@ struct drm_panel_funcs {
*/
struct drm_panel {
/**
- * @drm:
+ * @dev:
*
- * DRM device owning the panel.
+ * Parent device of the panel.
*/
- struct drm_device *drm;
+ struct device *dev;
/**
- * @connector:
+ * @backlight:
*
- * DRM connector that the panel is attached to.
+ * Backlight device, used to turn on backlight after the call
+ * to enable(), and to turn off backlight before the call to
+ * disable().
+ * backlight is set by drm_panel_of_backlight() and drivers
+ * shall not assign it.
*/
- struct drm_connector *connector;
+ struct backlight_device *backlight;
/**
- * @dev:
+ * @funcs:
*
- * Parent device of the panel.
+ * Operations that can be performed on the panel.
*/
- struct device *dev;
+ const struct drm_panel_funcs *funcs;
/**
- * @funcs:
+ * @connector_type:
*
- * Operations that can be performed on the panel.
+ * Type of the panel as a DRM_MODE_CONNECTOR_* value. This is used to
+ * initialise the drm_connector corresponding to the panel with the
+ * correct connector type.
*/
- const struct drm_panel_funcs *funcs;
+ int connector_type;
/**
* @list:
@@ -147,7 +171,9 @@ struct drm_panel {
struct list_head list;
};
-void drm_panel_init(struct drm_panel *panel);
+void drm_panel_init(struct drm_panel *panel, struct device *dev,
+ const struct drm_panel_funcs *funcs,
+ int connector_type);
int drm_panel_add(struct drm_panel *panel);
void drm_panel_remove(struct drm_panel *panel);
@@ -161,7 +187,7 @@ int drm_panel_unprepare(struct drm_panel *panel);
int drm_panel_enable(struct drm_panel *panel);
int drm_panel_disable(struct drm_panel *panel);
-int drm_panel_get_modes(struct drm_panel *panel);
+int drm_panel_get_modes(struct drm_panel *panel, struct drm_connector *connector);
#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL)
struct drm_panel *of_drm_find_panel(const struct device_node *np);
@@ -172,4 +198,13 @@ static inline struct drm_panel *of_drm_find_panel(const struct device_node *np)
}
#endif
+#if IS_REACHABLE(CONFIG_BACKLIGHT_CLASS_DEVICE)
+int drm_panel_of_backlight(struct drm_panel *panel);
+#else
+static inline int drm_panel_of_backlight(struct drm_panel *panel)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/include/drm/drm_pci.h b/include/drm/drm_pci.h
index 8181e9e7cf1d..9031e217b506 100644
--- a/include/drm/drm_pci.h
+++ b/include/drm/drm_pci.h
@@ -39,23 +39,36 @@ struct drm_device;
struct drm_driver;
struct drm_master;
+#ifdef CONFIG_PCI
+
struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
size_t align);
void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
-int drm_legacy_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
-void drm_legacy_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
-#ifdef CONFIG_PCI
int drm_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver);
+
#else
+
+static inline struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev,
+ size_t size, size_t align)
+{
+ return NULL;
+}
+
+static inline void drm_pci_free(struct drm_device *dev,
+ struct drm_dma_handle *dmah)
+{
+}
+
static inline int drm_get_pci_dev(struct pci_dev *pdev,
const struct pci_device_id *ent,
struct drm_driver *driver)
{
return -ENOSYS;
}
+
#endif
#endif /* _DRM_PCI_H_ */
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index cd5903ad33f7..3f396d94afe4 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -140,10 +140,11 @@ struct drm_plane_state {
* @zpos:
* Priority of the given plane on crtc (optional).
*
- * Note that multiple active planes on the same crtc can have an
- * identical zpos value. The rule to solving the conflict is to compare
- * the plane object IDs; the plane with a higher ID must be stacked on
- * top of a plane with a lower ID.
+ * User-space may set mutable zpos properties so that multiple active
+ * planes on the same CRTC have identical zpos values. This is a
+ * user-space bug, but drivers can solve the conflict by comparing the
+ * plane object IDs; the plane with a higher ID is stacked on top of a
+ * plane with a lower ID.
*
* See drm_plane_create_zpos_property() and
* drm_plane_create_zpos_immutable_property() for more details.
@@ -183,8 +184,26 @@ struct drm_plane_state {
*/
struct drm_property_blob *fb_damage_clips;
- /** @src: clipped source coordinates of the plane (in 16.16) */
- /** @dst: clipped destination coordinates of the plane */
+ /**
+ * @src:
+ *
+ * source coordinates of the plane (in 16.16).
+ *
+ * When using drm_atomic_helper_check_plane_state(),
+ * the coordinates are clipped, but the driver may choose
+ * to use unclipped coordinates instead when the hardware
+ * performs the clipping automatically.
+ */
+ /**
+ * @dst:
+ *
+ * clipped destination coordinates of the plane.
+ *
+ * When using drm_atomic_helper_check_plane_state(),
+ * the coordinates are clipped, but the driver may choose
+ * to use unclipped coordinates instead when the hardware
+ * performs the clipping automatically.
+ */
struct drm_rect src, dst;
/**
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
index d89311b822d5..9af7422b44cf 100644
--- a/include/drm/drm_prime.h
+++ b/include/drm/drm_prime.h
@@ -61,8 +61,6 @@ struct drm_device;
struct drm_gem_object;
struct drm_file;
-struct device;
-
/* core prime functions */
struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
struct dma_buf_export_info *exp_info);
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index a5d6f2f3e430..8f99d389792d 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -34,6 +34,9 @@
#include <drm/drm.h>
+/* Do *not* use outside of drm_print.[ch]! */
+extern unsigned int __drm_debug;
+
/**
* DOC: print
*
@@ -83,11 +86,14 @@ void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf);
void __drm_puts_seq_file(struct drm_printer *p, const char *str);
void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf);
void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf);
+void __drm_printfn_err(struct drm_printer *p, struct va_format *vaf);
__printf(2, 3)
void drm_printf(struct drm_printer *p, const char *f, ...);
void drm_puts(struct drm_printer *p, const char *str);
void drm_print_regset32(struct drm_printer *p, struct debugfs_regset32 *regset);
+void drm_print_bits(struct drm_printer *p, unsigned long value,
+ const char * const bits[], unsigned int nbits);
__printf(2, 0)
/**
@@ -227,82 +233,106 @@ static inline struct drm_printer drm_debug_printer(const char *prefix)
return p;
}
-/*
- * The following categories are defined:
+/**
+ * drm_err_printer - construct a &drm_printer that outputs to pr_err()
+ * @prefix: debug output prefix
*
- * CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c, drm_memory.c, ...
- * This is the category used by the DRM_DEBUG() macro.
+ * RETURNS:
+ * The &drm_printer object
+ */
+static inline struct drm_printer drm_err_printer(const char *prefix)
+{
+ struct drm_printer p = {
+ .printfn = __drm_printfn_err,
+ .prefix = prefix
+ };
+ return p;
+}
+
+/**
+ * enum drm_debug_category - The DRM debug categories
*
- * DRIVER: Used in the vendor specific part of the driver: i915, radeon, ...
- * This is the category used by the DRM_DEBUG_DRIVER() macro.
+ * Each of the DRM debug logging macros use a specific category, and the logging
+ * is filtered by the drm.debug module parameter. This enum specifies the values
+ * for the interface.
*
- * KMS: used in the modesetting code.
- * This is the category used by the DRM_DEBUG_KMS() macro.
+ * Each DRM_DEBUG_<CATEGORY> macro logs to DRM_UT_<CATEGORY> category, except
+ * DRM_DEBUG() logs to DRM_UT_CORE.
*
- * PRIME: used in the prime code.
- * This is the category used by the DRM_DEBUG_PRIME() macro.
+ * Enabling verbose debug messages is done through the drm.debug parameter, each
+ * category being enabled by a bit:
*
- * ATOMIC: used in the atomic code.
- * This is the category used by the DRM_DEBUG_ATOMIC() macro.
+ * - drm.debug=0x1 will enable CORE messages
+ * - drm.debug=0x2 will enable DRIVER messages
+ * - drm.debug=0x3 will enable CORE and DRIVER messages
+ * - ...
+ * - drm.debug=0x1ff will enable all messages
*
- * VBL: used for verbose debug message in the vblank code
- * This is the category used by the DRM_DEBUG_VBL() macro.
+ * An interesting feature is that it's possible to enable verbose logging at
+ * run-time by echoing the debug value in its sysfs node::
*
- * Enabling verbose debug messages is done through the drm.debug parameter,
- * each category being enabled by a bit.
+ * # echo 0xf > /sys/module/drm/parameters/debug
*
- * drm.debug=0x1 will enable CORE messages
- * drm.debug=0x2 will enable DRIVER messages
- * drm.debug=0x3 will enable CORE and DRIVER messages
- * ...
- * drm.debug=0x3f will enable all messages
+ */
+enum drm_debug_category {
+ /**
+ * @DRM_UT_CORE: Used in the generic drm code: drm_ioctl.c, drm_mm.c,
+ * drm_memory.c, ...
+ */
+ DRM_UT_CORE = 0x01,
+ /**
+ * @DRM_UT_DRIVER: Used in the vendor specific part of the driver: i915,
+ * radeon, ... macro.
+ */
+ DRM_UT_DRIVER = 0x02,
+ /**
+ * @DRM_UT_KMS: Used in the modesetting code.
+ */
+ DRM_UT_KMS = 0x04,
+ /**
+ * @DRM_UT_PRIME: Used in the prime code.
+ */
+ DRM_UT_PRIME = 0x08,
+ /**
+ * @DRM_UT_ATOMIC: Used in the atomic code.
+ */
+ DRM_UT_ATOMIC = 0x10,
+ /**
+ * @DRM_UT_VBL: Used for verbose debug message in the vblank code.
+ */
+ DRM_UT_VBL = 0x20,
+ /**
+ * @DRM_UT_STATE: Used for verbose atomic state debugging.
+ */
+ DRM_UT_STATE = 0x40,
+ /**
+ * @DRM_UT_LEASE: Used in the lease code.
+ */
+ DRM_UT_LEASE = 0x80,
+ /**
+ * @DRM_UT_DP: Used in the DP code.
+ */
+ DRM_UT_DP = 0x100,
+};
+
+static inline bool drm_debug_enabled(enum drm_debug_category category)
+{
+ return unlikely(__drm_debug & category);
+}
+
+/*
+ * struct device based logging
*
- * An interesting feature is that it's possible to enable verbose logging at
- * run-time by echoing the debug value in its sysfs node:
- * # echo 0xf > /sys/module/drm/parameters/debug
+ * Prefer drm_device based logging over device or prink based logging.
*/
-#define DRM_UT_NONE 0x00
-#define DRM_UT_CORE 0x01
-#define DRM_UT_DRIVER 0x02
-#define DRM_UT_KMS 0x04
-#define DRM_UT_PRIME 0x08
-#define DRM_UT_ATOMIC 0x10
-#define DRM_UT_VBL 0x20
-#define DRM_UT_STATE 0x40
-#define DRM_UT_LEASE 0x80
-#define DRM_UT_DP 0x100
__printf(3, 4)
void drm_dev_printk(const struct device *dev, const char *level,
const char *format, ...);
__printf(3, 4)
-void drm_dev_dbg(const struct device *dev, unsigned int category,
+void drm_dev_dbg(const struct device *dev, enum drm_debug_category category,
const char *format, ...);
-__printf(2, 3)
-void drm_dbg(unsigned int category, const char *format, ...);
-__printf(1, 2)
-void drm_err(const char *format, ...);
-
-/* Macros to make printk easier */
-
-#define _DRM_PRINTK(once, level, fmt, ...) \
- printk##once(KERN_##level "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
-
-#define DRM_INFO(fmt, ...) \
- _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
-#define DRM_NOTE(fmt, ...) \
- _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__)
-#define DRM_WARN(fmt, ...) \
- _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__)
-
-#define DRM_INFO_ONCE(fmt, ...) \
- _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__)
-#define DRM_NOTE_ONCE(fmt, ...) \
- _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__)
-#define DRM_WARN_ONCE(fmt, ...) \
- _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
-
/**
* Error output.
*
@@ -311,8 +341,6 @@ void drm_err(const char *format, ...);
*/
#define DRM_DEV_ERROR(dev, fmt, ...) \
drm_dev_printk(dev, KERN_ERR, "*ERROR* " fmt, ##__VA_ARGS__)
-#define DRM_ERROR(fmt, ...) \
- drm_err(fmt, ##__VA_ARGS__)
/**
* Rate limited error output. Like DRM_ERROR() but won't flood the log.
@@ -329,10 +357,8 @@ void drm_err(const char *format, ...);
if (__ratelimit(&_rs)) \
DRM_DEV_ERROR(dev, fmt, ##__VA_ARGS__); \
})
-#define DRM_ERROR_RATELIMITED(fmt, ...) \
- DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
-#define DRM_DEV_INFO(dev, fmt, ...) \
+#define DRM_DEV_INFO(dev, fmt, ...) \
drm_dev_printk(dev, KERN_INFO, fmt, ##__VA_ARGS__)
#define DRM_DEV_INFO_ONCE(dev, fmt, ...) \
@@ -352,41 +378,18 @@ void drm_err(const char *format, ...);
*/
#define DRM_DEV_DEBUG(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_CORE, fmt, ##__VA_ARGS__)
-#define DRM_DEBUG(fmt, ...) \
- drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__)
-
#define DRM_DEV_DEBUG_DRIVER(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
-#define DRM_DEBUG_DRIVER(fmt, ...) \
- drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
-
#define DRM_DEV_DEBUG_KMS(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_KMS, fmt, ##__VA_ARGS__)
-#define DRM_DEBUG_KMS(fmt, ...) \
- drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__)
-
#define DRM_DEV_DEBUG_PRIME(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
-#define DRM_DEBUG_PRIME(fmt, ...) \
- drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__)
-
#define DRM_DEV_DEBUG_ATOMIC(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
-#define DRM_DEBUG_ATOMIC(fmt, ...) \
- drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
-
#define DRM_DEV_DEBUG_VBL(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_VBL, fmt, ##__VA_ARGS__)
-#define DRM_DEBUG_VBL(fmt, ...) \
- drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__)
-
-#define DRM_DEBUG_LEASE(fmt, ...) \
- drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
-
#define DRM_DEV_DEBUG_DP(dev, fmt, ...) \
drm_dev_dbg(dev, DRM_UT_DP, fmt, ## __VA_ARGS__)
-#define DRM_DEBUG_DP(fmt, ...) \
- drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
#define _DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, category, fmt, ...) \
({ \
@@ -406,24 +409,147 @@ void drm_err(const char *format, ...);
#define DRM_DEV_DEBUG_RATELIMITED(dev, fmt, ...) \
_DEV_DRM_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_CORE, \
fmt, ##__VA_ARGS__)
-#define DRM_DEBUG_RATELIMITED(fmt, ...) \
- DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
-
#define DRM_DEV_DEBUG_DRIVER_RATELIMITED(dev, fmt, ...) \
_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_DRIVER, \
fmt, ##__VA_ARGS__)
-#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, ...) \
- DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
-
#define DRM_DEV_DEBUG_KMS_RATELIMITED(dev, fmt, ...) \
_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_KMS, \
fmt, ##__VA_ARGS__)
-#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) \
- DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
-
#define DRM_DEV_DEBUG_PRIME_RATELIMITED(dev, fmt, ...) \
_DRM_DEV_DEFINE_DEBUG_RATELIMITED(dev, DRM_UT_PRIME, \
fmt, ##__VA_ARGS__)
+
+/*
+ * struct drm_device based logging
+ *
+ * Prefer drm_device based logging over device or prink based logging.
+ */
+
+/* Helper for struct drm_device based logging. */
+#define __drm_printk(drm, level, type, fmt, ...) \
+ dev_##level##type((drm)->dev, "[drm] " fmt, ##__VA_ARGS__)
+
+
+#define drm_info(drm, fmt, ...) \
+ __drm_printk((drm), info,, fmt, ##__VA_ARGS__)
+
+#define drm_notice(drm, fmt, ...) \
+ __drm_printk((drm), notice,, fmt, ##__VA_ARGS__)
+
+#define drm_warn(drm, fmt, ...) \
+ __drm_printk((drm), warn,, fmt, ##__VA_ARGS__)
+
+#define drm_err(drm, fmt, ...) \
+ __drm_printk((drm), err,, "*ERROR* " fmt, ##__VA_ARGS__)
+
+
+#define drm_info_once(drm, fmt, ...) \
+ __drm_printk((drm), info, _once, fmt, ##__VA_ARGS__)
+
+#define drm_notice_once(drm, fmt, ...) \
+ __drm_printk((drm), notice, _once, fmt, ##__VA_ARGS__)
+
+#define drm_warn_once(drm, fmt, ...) \
+ __drm_printk((drm), warn, _once, fmt, ##__VA_ARGS__)
+
+#define drm_err_once(drm, fmt, ...) \
+ __drm_printk((drm), err, _once, "*ERROR* " fmt, ##__VA_ARGS__)
+
+
+#define drm_err_ratelimited(drm, fmt, ...) \
+ __drm_printk((drm), err, _ratelimited, "*ERROR* " fmt, ##__VA_ARGS__)
+
+
+#define drm_dbg_core(drm, fmt, ...) \
+ drm_dev_dbg((drm)->dev, DRM_UT_CORE, fmt, ##__VA_ARGS__)
+#define drm_dbg(drm, fmt, ...) \
+ drm_dev_dbg((drm)->dev, DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+#define drm_dbg_kms(drm, fmt, ...) \
+ drm_dev_dbg((drm)->dev, DRM_UT_KMS, fmt, ##__VA_ARGS__)
+#define drm_dbg_prime(drm, fmt, ...) \
+ drm_dev_dbg((drm)->dev, DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+#define drm_dbg_atomic(drm, fmt, ...) \
+ drm_dev_dbg((drm)->dev, DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+#define drm_dbg_vbl(drm, fmt, ...) \
+ drm_dev_dbg((drm)->dev, DRM_UT_VBL, fmt, ##__VA_ARGS__)
+#define drm_dbg_state(drm, fmt, ...) \
+ drm_dev_dbg((drm)->dev, DRM_UT_STATE, fmt, ##__VA_ARGS__)
+#define drm_dbg_lease(drm, fmt, ...) \
+ drm_dev_dbg((drm)->dev, DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+#define drm_dbg_dp(drm, fmt, ...) \
+ drm_dev_dbg((drm)->dev, DRM_UT_DP, fmt, ##__VA_ARGS__)
+
+
+/*
+ * printk based logging
+ *
+ * Prefer drm_device based logging over device or prink based logging.
+ */
+
+__printf(2, 3)
+void __drm_dbg(enum drm_debug_category category, const char *format, ...);
+__printf(1, 2)
+void __drm_err(const char *format, ...);
+
+/* Macros to make printk easier */
+
+#define _DRM_PRINTK(once, level, fmt, ...) \
+ printk##once(KERN_##level "[" DRM_NAME "] " fmt, ##__VA_ARGS__)
+
+#define DRM_INFO(fmt, ...) \
+ _DRM_PRINTK(, INFO, fmt, ##__VA_ARGS__)
+#define DRM_NOTE(fmt, ...) \
+ _DRM_PRINTK(, NOTICE, fmt, ##__VA_ARGS__)
+#define DRM_WARN(fmt, ...) \
+ _DRM_PRINTK(, WARNING, fmt, ##__VA_ARGS__)
+
+#define DRM_INFO_ONCE(fmt, ...) \
+ _DRM_PRINTK(_once, INFO, fmt, ##__VA_ARGS__)
+#define DRM_NOTE_ONCE(fmt, ...) \
+ _DRM_PRINTK(_once, NOTICE, fmt, ##__VA_ARGS__)
+#define DRM_WARN_ONCE(fmt, ...) \
+ _DRM_PRINTK(_once, WARNING, fmt, ##__VA_ARGS__)
+
+#define DRM_ERROR(fmt, ...) \
+ __drm_err(fmt, ##__VA_ARGS__)
+
+#define DRM_ERROR_RATELIMITED(fmt, ...) \
+ DRM_DEV_ERROR_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+
+#define DRM_DEBUG(fmt, ...) \
+ __drm_dbg(DRM_UT_CORE, fmt, ##__VA_ARGS__)
+
+#define DRM_DEBUG_DRIVER(fmt, ...) \
+ __drm_dbg(DRM_UT_DRIVER, fmt, ##__VA_ARGS__)
+
+#define DRM_DEBUG_KMS(fmt, ...) \
+ __drm_dbg(DRM_UT_KMS, fmt, ##__VA_ARGS__)
+
+#define DRM_DEBUG_PRIME(fmt, ...) \
+ __drm_dbg(DRM_UT_PRIME, fmt, ##__VA_ARGS__)
+
+#define DRM_DEBUG_ATOMIC(fmt, ...) \
+ __drm_dbg(DRM_UT_ATOMIC, fmt, ##__VA_ARGS__)
+
+#define DRM_DEBUG_VBL(fmt, ...) \
+ __drm_dbg(DRM_UT_VBL, fmt, ##__VA_ARGS__)
+
+#define DRM_DEBUG_LEASE(fmt, ...) \
+ __drm_dbg(DRM_UT_LEASE, fmt, ##__VA_ARGS__)
+
+#define DRM_DEBUG_DP(fmt, ...) \
+ __drm_dbg(DRM_UT_DP, fmt, ## __VA_ARGS__)
+
+
+#define DRM_DEBUG_RATELIMITED(fmt, ...) \
+ DRM_DEV_DEBUG_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+
+#define DRM_DEBUG_DRIVER_RATELIMITED(fmt, ...) \
+ DRM_DEV_DEBUG_DRIVER_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+
+#define DRM_DEBUG_KMS_RATELIMITED(fmt, ...) \
+ DRM_DEV_DEBUG_KMS_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
+
#define DRM_DEBUG_PRIME_RATELIMITED(fmt, ...) \
DRM_DEV_DEBUG_PRIME_RATELIMITED(NULL, fmt, ##__VA_ARGS__)
diff --git a/include/drm/drm_rect.h b/include/drm/drm_rect.h
index 6195820aa5c5..57a3be9e53e4 100644
--- a/include/drm/drm_rect.h
+++ b/include/drm/drm_rect.h
@@ -24,6 +24,8 @@
#ifndef DRM_RECT_H
#define DRM_RECT_H
+#include <linux/types.h>
+
/**
* DOC: rect utils
*
@@ -70,6 +72,23 @@ struct drm_rect {
(r)->y1 >> 16, (((r)->y1 & 0xffff) * 15625) >> 10
/**
+ * drm_rect_init - initialize the rectangle from x/y/w/h
+ * @r: rectangle
+ * @x: x coordinate
+ * @y: y coordinate
+ * @width: width
+ * @height: height
+ */
+static inline void drm_rect_init(struct drm_rect *r, int x, int y,
+ int width, int height)
+{
+ r->x1 = x;
+ r->y1 = y;
+ r->x2 = x + width;
+ r->y2 = y + height;
+}
+
+/**
* drm_rect_adjust_size - adjust the size of the rectangle
* @r: rectangle to be adjusted
* @dw: horizontal adjustment
@@ -107,6 +126,20 @@ static inline void drm_rect_translate(struct drm_rect *r, int dx, int dy)
}
/**
+ * drm_rect_translate_to - translate the rectangle to an absolute position
+ * @r: rectangle to be tranlated
+ * @x: horizontal position
+ * @y: vertical position
+ *
+ * Move rectangle @r to @x in the horizontal direction,
+ * and to @y in the vertical direction.
+ */
+static inline void drm_rect_translate_to(struct drm_rect *r, int x, int y)
+{
+ drm_rect_translate(r, x - r->x1, y - r->y1);
+}
+
+/**
* drm_rect_downscale - downscale a rectangle
* @r: rectangle to be downscaled
* @horz: horizontal downscale factor
diff --git a/include/drm/drm_scdc_helper.h b/include/drm/drm_scdc_helper.h
index f92eb2094d6b..6a483533aae4 100644
--- a/include/drm/drm_scdc_helper.h
+++ b/include/drm/drm_scdc_helper.h
@@ -50,9 +50,9 @@
#define SCDC_READ_REQUEST_ENABLE (1 << 0)
#define SCDC_STATUS_FLAGS_0 0x40
-#define SCDC_CH2_LOCK (1 < 3)
-#define SCDC_CH1_LOCK (1 < 2)
-#define SCDC_CH0_LOCK (1 < 1)
+#define SCDC_CH2_LOCK (1 << 3)
+#define SCDC_CH1_LOCK (1 << 2)
+#define SCDC_CH0_LOCK (1 << 1)
#define SCDC_CH_LOCK_MASK (SCDC_CH2_LOCK | SCDC_CH1_LOCK | SCDC_CH0_LOCK)
#define SCDC_CLOCK_DETECT (1 << 0)
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index 4d89cd0a60db..15afee9cf049 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -49,7 +49,7 @@ struct drm_simple_display_pipe_funcs {
*
* drm_mode_status Enum
*/
- enum drm_mode_status (*mode_valid)(struct drm_crtc *crtc,
+ enum drm_mode_status (*mode_valid)(struct drm_simple_display_pipe *pipe,
const struct drm_display_mode *mode);
/**
diff --git a/include/drm/drm_util.h b/include/drm/drm_util.h
index 07b8e9f04599..79952d8c4bba 100644
--- a/include/drm/drm_util.h
+++ b/include/drm/drm_util.h
@@ -41,7 +41,7 @@
* Use EXPORT_SYMBOL_FOR_TESTS_ONLY() for functions that shall
* only be visible for drmselftests.
*/
-#if defined(CONFIG_DRM_DEBUG_SELFTEST_MODULE)
+#if defined(CONFIG_DRM_EXPORT_FOR_TESTS)
#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x) EXPORT_SYMBOL(x)
#else
#define EXPORT_SYMBOL_FOR_TESTS_ONLY(x)
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index 9fe4ba8bc622..c16c44052b3d 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -109,9 +109,20 @@ struct drm_vblank_crtc {
seqlock_t seqlock;
/**
- * @count: Current software vblank counter.
+ * @count:
+ *
+ * Current software vblank counter.
+ *
+ * Note that for a given vblank counter value drm_crtc_handle_vblank()
+ * and drm_crtc_vblank_count() or drm_crtc_vblank_count_and_time()
+ * provide a barrier: Any writes done before calling
+ * drm_crtc_handle_vblank() will be visible to callers of the later
+ * functions, iff the vblank count is the same or a later one.
+ *
+ * IMPORTANT: This guarantee requires barriers, therefor never access
+ * this field directly. Use drm_crtc_vblank_count() instead.
*/
- u64 count;
+ atomic64_t count;
/**
* @time: Vblank timestamp corresponding to @count.
*/
diff --git a/include/drm/drm_vram_mm_helper.h b/include/drm/drm_vram_mm_helper.h
deleted file mode 100644
index 2aacfb1ccfae..000000000000
--- a/include/drm/drm_vram_mm_helper.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-
-#ifndef DRM_VRAM_MM_HELPER_H
-#define DRM_VRAM_MM_HELPER_H
-
-#include <drm/drm_file.h>
-#include <drm/drm_ioctl.h>
-#include <drm/ttm/ttm_bo_driver.h>
-
-struct drm_device;
-
-/**
- * struct drm_vram_mm_funcs - Callback functions for &struct drm_vram_mm
- * @evict_flags: Provides an implementation for struct \
- &ttm_bo_driver.evict_flags
- * @verify_access: Provides an implementation for \
- struct &ttm_bo_driver.verify_access
- *
- * These callback function integrate VRAM MM with TTM buffer objects. New
- * functions can be added if necessary.
- */
-struct drm_vram_mm_funcs {
- void (*evict_flags)(struct ttm_buffer_object *bo,
- struct ttm_placement *placement);
- int (*verify_access)(struct ttm_buffer_object *bo, struct file *filp);
-};
-
-/**
- * struct drm_vram_mm - An instance of VRAM MM
- * @vram_base: Base address of the managed video memory
- * @vram_size: Size of the managed video memory in bytes
- * @bdev: The TTM BO device.
- * @funcs: TTM BO functions
- *
- * The fields &struct drm_vram_mm.vram_base and
- * &struct drm_vram_mm.vrm_size are managed by VRAM MM, but are
- * available for public read access. Use the field
- * &struct drm_vram_mm.bdev to access the TTM BO device.
- */
-struct drm_vram_mm {
- uint64_t vram_base;
- size_t vram_size;
-
- struct ttm_bo_device bdev;
-
- const struct drm_vram_mm_funcs *funcs;
-};
-
-/**
- * drm_vram_mm_of_bdev() - \
- Returns the container of type &struct ttm_bo_device for field bdev.
- * @bdev: the TTM BO device
- *
- * Returns:
- * The containing instance of &struct drm_vram_mm
- */
-static inline struct drm_vram_mm *drm_vram_mm_of_bdev(
- struct ttm_bo_device *bdev)
-{
- return container_of(bdev, struct drm_vram_mm, bdev);
-}
-
-int drm_vram_mm_init(struct drm_vram_mm *vmm, struct drm_device *dev,
- uint64_t vram_base, size_t vram_size,
- const struct drm_vram_mm_funcs *funcs);
-void drm_vram_mm_cleanup(struct drm_vram_mm *vmm);
-
-int drm_vram_mm_mmap(struct file *filp, struct vm_area_struct *vma,
- struct drm_vram_mm *vmm);
-
-/*
- * Helpers for integration with struct drm_device
- */
-
-struct drm_vram_mm *drm_vram_helper_alloc_mm(
- struct drm_device *dev, uint64_t vram_base, size_t vram_size,
- const struct drm_vram_mm_funcs *funcs);
-void drm_vram_helper_release_mm(struct drm_device *dev);
-
-/*
- * Helpers for &struct file_operations
- */
-
-int drm_vram_mm_file_operations_mmap(
- struct file *filp, struct vm_area_struct *vma);
-
-/**
- * define DRM_VRAM_MM_FILE_OPERATIONS - default callback functions for \
- &struct file_operations
- *
- * Drivers that use VRAM MM can use this macro to initialize
- * &struct file_operations with default functions.
- */
-#define DRM_VRAM_MM_FILE_OPERATIONS \
- .llseek = no_llseek, \
- .read = drm_read, \
- .poll = drm_poll, \
- .unlocked_ioctl = drm_ioctl, \
- .compat_ioctl = drm_compat_ioctl, \
- .mmap = drm_vram_mm_file_operations_mmap, \
- .open = drm_open, \
- .release = drm_release \
-
-#endif
diff --git a/include/drm/gpu_scheduler.h b/include/drm/gpu_scheduler.h
index 57b4121c750a..589be851f8a1 100644
--- a/include/drm/gpu_scheduler.h
+++ b/include/drm/gpu_scheduler.h
@@ -26,6 +26,7 @@
#include <drm/spsc_queue.h>
#include <linux/dma-fence.h>
+#include <linux/completion.h>
#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
@@ -51,9 +52,10 @@ enum drm_sched_priority {
* @list: used to append this struct to the list of entities in the
* runqueue.
* @rq: runqueue on which this entity is currently scheduled.
- * @rq_list: a list of run queues on which jobs from this entity can
- * be scheduled
- * @num_rq_list: number of run queues in the rq_list
+ * @sched_list: A list of schedulers (drm_gpu_schedulers).
+ * Jobs from this entity can be scheduled on any scheduler
+ * on this list.
+ * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
* @rq_lock: lock to modify the runqueue to which this entity belongs.
* @job_queue: the list of jobs of this entity.
* @fence_seq: a linearly increasing seqno incremented with each
@@ -71,6 +73,7 @@ enum drm_sched_priority {
* @last_scheduled: points to the finished fence of the last scheduled job.
* @last_user: last group leader pushing a job into the entity.
* @stopped: Marks the enity as removed from rq and destined for termination.
+ * @entity_idle: Signals when enityt is not in use
*
* Entities will emit jobs in order to their corresponding hardware
* ring, and the scheduler will alternate between entities based on
@@ -79,8 +82,9 @@ enum drm_sched_priority {
struct drm_sched_entity {
struct list_head list;
struct drm_sched_rq *rq;
- struct drm_sched_rq **rq_list;
- unsigned int num_rq_list;
+ struct drm_gpu_scheduler **sched_list;
+ unsigned int num_sched_list;
+ enum drm_sched_priority priority;
spinlock_t rq_lock;
struct spsc_queue job_queue;
@@ -94,6 +98,7 @@ struct drm_sched_entity {
struct dma_fence *last_scheduled;
struct task_struct *last_user;
bool stopped;
+ struct completion entity_idle;
};
/**
@@ -257,7 +262,7 @@ struct drm_sched_backend_ops {
* @job_list_lock: lock to protect the ring_mirror_list.
* @hang_limit: once the hangs by a job crosses this limit then it is marked
* guilty and it will be considered for scheduling further.
- * @num_jobs: the number of jobs in queue in the scheduler
+ * @score: score to help loadbalancer pick a idle sched
* @ready: marks if the underlying HW is ready to work
* @free_guilty: A hit to time out handler to free the guilty job.
*
@@ -278,8 +283,8 @@ struct drm_gpu_scheduler {
struct list_head ring_mirror_list;
spinlock_t job_list_lock;
int hang_limit;
- atomic_t num_jobs;
- bool ready;
+ atomic_t score;
+ bool ready;
bool free_guilty;
};
@@ -309,8 +314,9 @@ void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
struct drm_sched_entity *entity);
int drm_sched_entity_init(struct drm_sched_entity *entity,
- struct drm_sched_rq **rq_list,
- unsigned int num_rq_list,
+ enum drm_sched_priority priority,
+ struct drm_gpu_scheduler **sched_list,
+ unsigned int num_sched_list,
atomic_t *guilty);
long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
void drm_sched_entity_fini(struct drm_sched_entity *entity);
diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h
index 23274cf92712..6722005884db 100644
--- a/include/drm/i915_drm.h
+++ b/include/drm/i915_drm.h
@@ -100,22 +100,4 @@ extern struct resource intel_graphics_stolen_res;
#define INTEL_GEN11_BSM_DW1 0xc4
#define INTEL_BSM_MASK (-(1u << 20))
-enum port {
- PORT_NONE = -1,
-
- PORT_A = 0,
- PORT_B,
- PORT_C,
- PORT_D,
- PORT_E,
- PORT_F,
- PORT_G,
- PORT_H,
- PORT_I,
-
- I915_MAX_PORTS
-};
-
-#define port_name(p) ((p) + 'A')
-
#endif /* _I915_DRM_H_ */
diff --git a/include/drm/i915_mei_hdcp_interface.h b/include/drm/i915_mei_hdcp_interface.h
index 8c344255146a..4d48de8890ca 100644
--- a/include/drm/i915_mei_hdcp_interface.h
+++ b/include/drm/i915_mei_hdcp_interface.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: (GPL-2.0+) */
/*
- * Copyright © 2017-2018 Intel Corporation
+ * Copyright © 2017-2019 Intel Corporation
*
* Authors:
* Ramalingam C <ramalingam.c@intel.com>
@@ -42,9 +42,44 @@ enum hdcp_wired_protocol {
HDCP_PROTOCOL_DP
};
+enum mei_fw_ddi {
+ MEI_DDI_INVALID_PORT = 0x0,
+
+ MEI_DDI_B = 1,
+ MEI_DDI_C,
+ MEI_DDI_D,
+ MEI_DDI_E,
+ MEI_DDI_F,
+ MEI_DDI_A = 7,
+ MEI_DDI_RANGE_END = MEI_DDI_A,
+};
+
+/**
+ * enum mei_fw_tc - ME Firmware defined index for transcoders
+ * @MEI_INVALID_TRANSCODER: Index for Invalid transcoder
+ * @MEI_TRANSCODER_EDP: Index for EDP Transcoder
+ * @MEI_TRANSCODER_DSI0: Index for DSI0 Transcoder
+ * @MEI_TRANSCODER_DSI1: Index for DSI1 Transcoder
+ * @MEI_TRANSCODER_A: Index for Transcoder A
+ * @MEI_TRANSCODER_B: Index for Transcoder B
+ * @MEI_TRANSCODER_C: Index for Transcoder C
+ * @MEI_TRANSCODER_D: Index for Transcoder D
+ */
+enum mei_fw_tc {
+ MEI_INVALID_TRANSCODER = 0x00,
+ MEI_TRANSCODER_EDP,
+ MEI_TRANSCODER_DSI0,
+ MEI_TRANSCODER_DSI1,
+ MEI_TRANSCODER_A = 0x10,
+ MEI_TRANSCODER_B,
+ MEI_TRANSCODER_C,
+ MEI_TRANSCODER_D
+};
+
/**
* struct hdcp_port_data - intel specific HDCP port data
- * @port: port index as per I915
+ * @fw_ddi: ddi index as per ME FW
+ * @fw_tc: transcoder index as per ME FW
* @port_type: HDCP port type as per ME FW classification
* @protocol: HDCP adaptation as per ME FW
* @k: No of streams transmitted on a port. Only on DP MST this is != 1
@@ -56,7 +91,8 @@ enum hdcp_wired_protocol {
* streams
*/
struct hdcp_port_data {
- enum port port;
+ enum mei_fw_ddi fw_ddi;
+ enum mei_fw_tc fw_tc;
u8 port_type;
u8 protocol;
u16 k;
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index b1f66b117c74..1d2c12219f44 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -446,23 +446,18 @@
/* CML GT1 */
#define INTEL_CML_GT1_IDS(info) \
- INTEL_VGA_DEVICE(0x9B21, info), \
- INTEL_VGA_DEVICE(0x9BAA, info), \
- INTEL_VGA_DEVICE(0x9BAB, info), \
- INTEL_VGA_DEVICE(0x9BAC, info), \
- INTEL_VGA_DEVICE(0x9BA0, info), \
INTEL_VGA_DEVICE(0x9BA5, info), \
INTEL_VGA_DEVICE(0x9BA8, info), \
INTEL_VGA_DEVICE(0x9BA4, info), \
INTEL_VGA_DEVICE(0x9BA2, info)
+#define INTEL_CML_U_GT1_IDS(info) \
+ INTEL_VGA_DEVICE(0x9B21, info), \
+ INTEL_VGA_DEVICE(0x9BAA, info), \
+ INTEL_VGA_DEVICE(0x9BAC, info)
+
/* CML GT2 */
#define INTEL_CML_GT2_IDS(info) \
- INTEL_VGA_DEVICE(0x9B41, info), \
- INTEL_VGA_DEVICE(0x9BCA, info), \
- INTEL_VGA_DEVICE(0x9BCB, info), \
- INTEL_VGA_DEVICE(0x9BCC, info), \
- INTEL_VGA_DEVICE(0x9BC0, info), \
INTEL_VGA_DEVICE(0x9BC5, info), \
INTEL_VGA_DEVICE(0x9BC8, info), \
INTEL_VGA_DEVICE(0x9BC4, info), \
@@ -471,6 +466,11 @@
INTEL_VGA_DEVICE(0x9BE6, info), \
INTEL_VGA_DEVICE(0x9BF6, info)
+#define INTEL_CML_U_GT2_IDS(info) \
+ INTEL_VGA_DEVICE(0x9B41, info), \
+ INTEL_VGA_DEVICE(0x9BCA, info), \
+ INTEL_VGA_DEVICE(0x9BCC, info)
+
#define INTEL_KBL_IDS(info) \
INTEL_KBL_GT1_IDS(info), \
INTEL_KBL_GT2_IDS(info), \
@@ -536,7 +536,9 @@
INTEL_WHL_U_GT3_IDS(info), \
INTEL_AML_CFL_GT2_IDS(info), \
INTEL_CML_GT1_IDS(info), \
- INTEL_CML_GT2_IDS(info)
+ INTEL_CML_GT2_IDS(info), \
+ INTEL_CML_U_GT1_IDS(info), \
+ INTEL_CML_U_GT2_IDS(info)
/* CNL */
#define INTEL_CNL_PORT_F_IDS(info) \
@@ -579,12 +581,15 @@
INTEL_VGA_DEVICE(0x8A51, info), \
INTEL_VGA_DEVICE(0x8A5D, info)
-/* EHL */
+/* EHL/JSL */
#define INTEL_EHL_IDS(info) \
INTEL_VGA_DEVICE(0x4500, info), \
INTEL_VGA_DEVICE(0x4571, info), \
INTEL_VGA_DEVICE(0x4551, info), \
- INTEL_VGA_DEVICE(0x4541, info)
+ INTEL_VGA_DEVICE(0x4541, info), \
+ INTEL_VGA_DEVICE(0x4E71, info), \
+ INTEL_VGA_DEVICE(0x4E61, info), \
+ INTEL_VGA_DEVICE(0x4E51, info)
/* TGL */
#define INTEL_TGL_12_IDS(info) \
diff --git a/include/drm/task_barrier.h b/include/drm/task_barrier.h
new file mode 100644
index 000000000000..087e3f649c52
--- /dev/null
+++ b/include/drm/task_barrier.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#include <linux/semaphore.h>
+#include <linux/atomic.h>
+
+/*
+ * Reusable 2 PHASE task barrier (randevouz point) implementation for N tasks.
+ * Based on the Little book of sempahores - https://greenteapress.com/wp/semaphores/
+ */
+
+
+
+#ifndef DRM_TASK_BARRIER_H_
+#define DRM_TASK_BARRIER_H_
+
+/*
+ * Represents an instance of a task barrier.
+ */
+struct task_barrier {
+ unsigned int n;
+ atomic_t count;
+ struct semaphore enter_turnstile;
+ struct semaphore exit_turnstile;
+};
+
+static inline void task_barrier_signal_turnstile(struct semaphore *turnstile,
+ unsigned int n)
+{
+ int i;
+
+ for (i = 0 ; i < n; i++)
+ up(turnstile);
+}
+
+static inline void task_barrier_init(struct task_barrier *tb)
+{
+ tb->n = 0;
+ atomic_set(&tb->count, 0);
+ sema_init(&tb->enter_turnstile, 0);
+ sema_init(&tb->exit_turnstile, 0);
+}
+
+static inline void task_barrier_add_task(struct task_barrier *tb)
+{
+ tb->n++;
+}
+
+static inline void task_barrier_rem_task(struct task_barrier *tb)
+{
+ tb->n--;
+}
+
+/*
+ * Lines up all the threads BEFORE the critical point.
+ *
+ * When all thread passed this code the entry barrier is back to locked state.
+ */
+static inline void task_barrier_enter(struct task_barrier *tb)
+{
+ if (atomic_inc_return(&tb->count) == tb->n)
+ task_barrier_signal_turnstile(&tb->enter_turnstile, tb->n);
+
+ down(&tb->enter_turnstile);
+}
+
+/*
+ * Lines up all the threads AFTER the critical point.
+ *
+ * This function is used to avoid any one thread running ahead if the barrier is
+ * used repeatedly .
+ */
+static inline void task_barrier_exit(struct task_barrier *tb)
+{
+ if (atomic_dec_return(&tb->count) == 0)
+ task_barrier_signal_turnstile(&tb->exit_turnstile, tb->n);
+
+ down(&tb->exit_turnstile);
+}
+
+/* Convinieince function when nothing to be done in between entry and exit */
+static inline void task_barrier_full(struct task_barrier *tb)
+{
+ task_barrier_enter(tb);
+ task_barrier_exit(tb);
+}
+
+#endif
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 43c4929a2171..66ca49db9633 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -147,7 +147,6 @@ struct ttm_tt;
* holds a pointer to a persistent shmem object.
* @ttm: TTM structure holding system pages.
* @evicted: Whether the object was evicted without user-space knowing.
- * @cpu_writes: For synchronization. Number of cpu writers.
* @lru: List head for the lru list.
* @ddestroy: List head for the delayed destroy list.
* @swap: List head for swap LRU list.
@@ -155,7 +154,6 @@ struct ttm_tt;
* @offset: The current GPU offset, which can have different meanings
* depending on the memory type. For SYSTEM type memory, it should be 0.
* @cur_placement: Hint of current placement.
- * @wu_mutex: Wait unreserved mutex.
*
* Base class for TTM buffer object, that deals with data placement and CPU
* mappings. GPU mappings are really up to the driver, but for simpler GPUs
@@ -199,12 +197,6 @@ struct ttm_buffer_object {
bool evicted;
/**
- * Members protected by the bo::reserved lock only when written to.
- */
-
- atomic_t cpu_writers;
-
- /**
* Members protected by the bdev::lru_lock.
*/
@@ -229,8 +221,6 @@ struct ttm_buffer_object {
uint64_t offset; /* GPU address space is independent of CPU word size */
struct sg_table *sg;
-
- struct mutex wu_mutex;
};
/**
@@ -368,30 +358,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo,
void ttm_bo_put(struct ttm_buffer_object *bo);
/**
- * ttm_bo_add_to_lru
- *
- * @bo: The buffer object.
- *
- * Add this bo to the relevant mem type lru and, if it's backed by
- * system pages (ttms) to the swap list.
- * This function must be called with struct ttm_bo_global::lru_lock held, and
- * is typically called immediately prior to unreserving a bo.
- */
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
-
-/**
- * ttm_bo_del_from_lru
- *
- * @bo: The buffer object.
- *
- * Remove this bo from all lru lists used to lookup and reserve an object.
- * This function must be called with struct ttm_bo_global::lru_lock held,
- * and is usually called just immediately after the bo has been reserved to
- * avoid recursive reservation from lru lists.
- */
-void ttm_bo_del_from_lru(struct ttm_buffer_object *bo);
-
-/**
* ttm_bo_move_to_lru_tail
*
* @bo: The buffer object.
@@ -442,31 +408,6 @@ bool ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place);
/**
- * ttm_bo_synccpu_write_grab
- *
- * @bo: The buffer object:
- * @no_wait: Return immediately if buffer is busy.
- *
- * Synchronizes a buffer object for CPU RW access. This means
- * command submission that affects the buffer will return -EBUSY
- * until ttm_bo_synccpu_write_release is called.
- *
- * Returns
- * -EBUSY if the buffer is busy and no_wait is true.
- * -ERESTARTSYS if interrupted by a signal.
- */
-int ttm_bo_synccpu_write_grab(struct ttm_buffer_object *bo, bool no_wait);
-
-/**
- * ttm_bo_synccpu_write_release:
- *
- * @bo : The buffer object.
- *
- * Releases a synccpu lock.
- */
-void ttm_bo_synccpu_write_release(struct ttm_buffer_object *bo);
-
-/**
* ttm_bo_acc_size
*
* @bdev: Pointer to a ttm_bo_device struct.
@@ -710,16 +651,14 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page,
void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map);
/**
- * ttm_fbdev_mmap - mmap fbdev memory backed by a ttm buffer object.
+ * ttm_bo_mmap_obj - mmap memory backed by a ttm buffer object.
*
* @vma: vma as input from the fbdev mmap method.
- * @bo: The bo backing the address space. The address space will
- * have the same size as the bo, and start at offset 0.
+ * @bo: The bo backing the address space.
*
- * This function is intended to be called by the fbdev mmap method
- * if the fbdev address space is to be backed by a bo.
+ * Maps a buffer object.
*/
-int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
+int ttm_bo_mmap_obj(struct vm_area_struct *vma, struct ttm_buffer_object *bo);
/**
* ttm_bo_mmap - mmap out of the ttm device address space.
@@ -765,7 +704,6 @@ ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp,
int ttm_bo_swapout(struct ttm_bo_global *glob,
struct ttm_operation_ctx *ctx);
void ttm_bo_swapout_all(struct ttm_bo_device *bdev);
-int ttm_bo_wait_unreserved(struct ttm_buffer_object *bo);
/**
* ttm_bo_uses_embedded_gem_object - check if the given bo uses the
@@ -785,4 +723,24 @@ static inline bool ttm_bo_uses_embedded_gem_object(struct ttm_buffer_object *bo)
{
return bo->base.dev != NULL;
}
+
+/* Default number of pre-faulted pages in the TTM fault handler */
+#define TTM_BO_VM_NUM_PREFAULT 16
+
+vm_fault_t ttm_bo_vm_reserve(struct ttm_buffer_object *bo,
+ struct vm_fault *vmf);
+
+vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf,
+ pgprot_t prot,
+ pgoff_t num_prefault);
+
+vm_fault_t ttm_bo_vm_fault(struct vm_fault *vmf);
+
+void ttm_bo_vm_open(struct vm_area_struct *vma);
+
+void ttm_bo_vm_close(struct vm_area_struct *vma);
+
+int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write);
+
#endif
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 6f536caea368..cac7a8a0825a 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -423,7 +423,6 @@ extern struct ttm_bo_global {
*/
struct kobject kobj;
- struct ttm_mem_global *mem_glob;
struct page *dummy_read_page;
spinlock_t lru_lock;
@@ -451,7 +450,7 @@ extern struct ttm_bo_global {
*
* @driver: Pointer to a struct ttm_bo_driver struct setup by the driver.
* @man: An array of mem_type_managers.
- * @vma_manager: Address space manager
+ * @vma_manager: Address space manager (pointer)
* lru_lock: Spinlock that protects the buffer+device lru lists and
* ddestroy lists.
* @dev_mapping: A pointer to the struct address_space representing the
@@ -467,14 +466,13 @@ struct ttm_bo_device {
* Constant after bo device init / atomic.
*/
struct list_head device_list;
- struct ttm_bo_global *glob;
struct ttm_bo_driver *driver;
struct ttm_mem_type_manager man[TTM_NUM_MEM_TYPES];
/*
* Protected by internal locks.
*/
- struct drm_vma_offset_manager vma_manager;
+ struct drm_vma_offset_manager *vma_manager;
/*
* Protected by the global:lru lock.
@@ -595,6 +593,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
* @glob: A pointer to an initialized struct ttm_bo_global.
* @driver: A pointer to a struct ttm_bo_driver set up by the caller.
* @mapping: The address space to use for this bo.
+ * @vma_manager: A pointer to a vma manager.
* @file_page_offset: Offset into the device address space that is available
* for buffer data. This ensures compatibility with other users of the
* address space.
@@ -606,6 +605,7 @@ int ttm_bo_device_release(struct ttm_bo_device *bdev);
int ttm_bo_device_init(struct ttm_bo_device *bdev,
struct ttm_bo_driver *driver,
struct address_space *mapping,
+ struct drm_vma_offset_manager *vma_manager,
bool need_dma32);
/**
@@ -629,9 +629,6 @@ void ttm_mem_io_free_vm(struct ttm_buffer_object *bo);
int ttm_mem_io_lock(struct ttm_mem_type_manager *man, bool interruptible);
void ttm_mem_io_unlock(struct ttm_mem_type_manager *man);
-void ttm_bo_del_sub_from_lru(struct ttm_buffer_object *bo);
-void ttm_bo_add_to_lru(struct ttm_buffer_object *bo);
-
/**
* __ttm_bo_reserve:
*
@@ -725,15 +722,9 @@ static inline int ttm_bo_reserve(struct ttm_buffer_object *bo,
bool interruptible, bool no_wait,
struct ww_acquire_ctx *ticket)
{
- int ret;
-
WARN_ON(!kref_read(&bo->kref));
- ret = __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
- if (likely(ret == 0))
- ttm_bo_del_sub_from_lru(bo);
-
- return ret;
+ return __ttm_bo_reserve(bo, interruptible, no_wait, ticket);
}
/**
@@ -760,9 +751,7 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
else
dma_resv_lock_slow(bo->base.resv, ticket);
- if (likely(ret == 0))
- ttm_bo_del_sub_from_lru(bo);
- else if (ret == -EINTR)
+ if (ret == -EINTR)
ret = -ERESTARTSYS;
return ret;
@@ -777,12 +766,9 @@ static inline int ttm_bo_reserve_slowpath(struct ttm_buffer_object *bo,
*/
static inline void ttm_bo_unreserve(struct ttm_buffer_object *bo)
{
- spin_lock(&bo->bdev->glob->lru_lock);
- if (list_empty(&bo->lru))
- ttm_bo_add_to_lru(bo);
- else
- ttm_bo_move_to_lru_tail(bo, NULL);
- spin_unlock(&bo->bdev->glob->lru_lock);
+ spin_lock(&ttm_bo_glob.lru_lock);
+ ttm_bo_move_to_lru_tail(bo, NULL);
+ spin_unlock(&ttm_bo_glob.lru_lock);
dma_resv_unlock(bo->base.resv);
}
diff --git a/include/drm/ttm/ttm_execbuf_util.h b/include/drm/ttm/ttm_execbuf_util.h
index 7e46cc678e7e..5a19843bb80d 100644
--- a/include/drm/ttm/ttm_execbuf_util.h
+++ b/include/drm/ttm/ttm_execbuf_util.h
@@ -99,7 +99,7 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
extern int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
struct list_head *list, bool intr,
- struct list_head *dups, bool del_lru);
+ struct list_head *dups);
/**
* function ttm_eu_fence_buffer_objects.
diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
index 3ff48a0a2d7b..c78ea99c42cf 100644
--- a/include/drm/ttm/ttm_memory.h
+++ b/include/drm/ttm/ttm_memory.h
@@ -65,7 +65,6 @@
struct ttm_mem_zone;
extern struct ttm_mem_global {
struct kobject kobj;
- struct ttm_bo_global *bo_glob;
struct workqueue_struct *swap_queue;
struct work_struct work;
spinlock_t lock;
diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h
index 4d9b019d253c..a6b6ef5f9bf4 100644
--- a/include/drm/ttm/ttm_page_alloc.h
+++ b/include/drm/ttm/ttm_page_alloc.h
@@ -74,7 +74,7 @@ void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt);
*/
int ttm_page_alloc_debugfs(struct seq_file *m, void *data);
-#if defined(CONFIG_SWIOTLB) || defined(CONFIG_INTEL_IOMMU)
+#if defined(CONFIG_DRM_TTM_DMA_PAGE_POOL)
/**
* Initialize pool allocator.
*/
diff --git a/include/dt-bindings/clock/aspeed-clock.h b/include/dt-bindings/clock/aspeed-clock.h
index f43738607d77..9ff4f6e4558c 100644
--- a/include/dt-bindings/clock/aspeed-clock.h
+++ b/include/dt-bindings/clock/aspeed-clock.h
@@ -39,6 +39,8 @@
#define ASPEED_CLK_BCLK 33
#define ASPEED_CLK_MPLL 34
#define ASPEED_CLK_24M 35
+#define ASPEED_CLK_MAC1RCLK 36
+#define ASPEED_CLK_MAC2RCLK 37
#define ASPEED_RESET_XDMA 0
#define ASPEED_RESET_MCTP 1
diff --git a/include/dt-bindings/clock/ast2600-clock.h b/include/dt-bindings/clock/ast2600-clock.h
index 38074a5f7296..62b9520a00fd 100644
--- a/include/dt-bindings/clock/ast2600-clock.h
+++ b/include/dt-bindings/clock/ast2600-clock.h
@@ -83,6 +83,10 @@
#define ASPEED_CLK_MAC12 64
#define ASPEED_CLK_MAC34 65
#define ASPEED_CLK_USBPHY_40M 66
+#define ASPEED_CLK_MAC1RCLK 67
+#define ASPEED_CLK_MAC2RCLK 68
+#define ASPEED_CLK_MAC3RCLK 69
+#define ASPEED_CLK_MAC4RCLK 70
/* Only list resets here that are not part of a gate */
#define ASPEED_RESET_ADC 55
diff --git a/include/dt-bindings/clock/axg-audio-clkc.h b/include/dt-bindings/clock/axg-audio-clkc.h
index 75901c636893..f561f5c5ef8f 100644
--- a/include/dt-bindings/clock/axg-audio-clkc.h
+++ b/include/dt-bindings/clock/axg-audio-clkc.h
@@ -80,5 +80,15 @@
#define AUD_CLKID_TDM_SCLK_PAD0 160
#define AUD_CLKID_TDM_SCLK_PAD1 161
#define AUD_CLKID_TDM_SCLK_PAD2 162
+#define AUD_CLKID_TOP 163
+#define AUD_CLKID_TORAM 164
+#define AUD_CLKID_EQDRC 165
+#define AUD_CLKID_RESAMPLE_B 166
+#define AUD_CLKID_TOVAD 167
+#define AUD_CLKID_LOCKER 168
+#define AUD_CLKID_SPDIFIN_LB 169
+#define AUD_CLKID_FRDDR_D 170
+#define AUD_CLKID_TODDR_D 171
+#define AUD_CLKID_LOOPBACK_B 172
#endif /* __AXG_AUDIO_CLKC_BINDINGS_H */
diff --git a/include/dt-bindings/clock/bm1880-clock.h b/include/dt-bindings/clock/bm1880-clock.h
new file mode 100644
index 000000000000..b46732361b25
--- /dev/null
+++ b/include/dt-bindings/clock/bm1880-clock.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Device Tree binding constants for Bitmain BM1880 SoC
+ *
+ * Copyright (c) 2019 Linaro Ltd.
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_BM1880_H
+#define __DT_BINDINGS_CLOCK_BM1880_H
+
+#define BM1880_CLK_OSC 0
+#define BM1880_CLK_MPLL 1
+#define BM1880_CLK_SPLL 2
+#define BM1880_CLK_FPLL 3
+#define BM1880_CLK_DDRPLL 4
+#define BM1880_CLK_A53 5
+#define BM1880_CLK_50M_A53 6
+#define BM1880_CLK_AHB_ROM 7
+#define BM1880_CLK_AXI_SRAM 8
+#define BM1880_CLK_DDR_AXI 9
+#define BM1880_CLK_EFUSE 10
+#define BM1880_CLK_APB_EFUSE 11
+#define BM1880_CLK_AXI5_EMMC 12
+#define BM1880_CLK_EMMC 13
+#define BM1880_CLK_100K_EMMC 14
+#define BM1880_CLK_AXI5_SD 15
+#define BM1880_CLK_SD 16
+#define BM1880_CLK_100K_SD 17
+#define BM1880_CLK_500M_ETH0 18
+#define BM1880_CLK_AXI4_ETH0 19
+#define BM1880_CLK_500M_ETH1 20
+#define BM1880_CLK_AXI4_ETH1 21
+#define BM1880_CLK_AXI1_GDMA 22
+#define BM1880_CLK_APB_GPIO 23
+#define BM1880_CLK_APB_GPIO_INTR 24
+#define BM1880_CLK_GPIO_DB 25
+#define BM1880_CLK_AXI1_MINER 26
+#define BM1880_CLK_AHB_SF 27
+#define BM1880_CLK_SDMA_AXI 28
+#define BM1880_CLK_SDMA_AUD 29
+#define BM1880_CLK_APB_I2C 30
+#define BM1880_CLK_APB_WDT 31
+#define BM1880_CLK_APB_JPEG 32
+#define BM1880_CLK_JPEG_AXI 33
+#define BM1880_CLK_AXI5_NF 34
+#define BM1880_CLK_APB_NF 35
+#define BM1880_CLK_NF 36
+#define BM1880_CLK_APB_PWM 37
+#define BM1880_CLK_DIV_0_RV 38
+#define BM1880_CLK_DIV_1_RV 39
+#define BM1880_CLK_MUX_RV 40
+#define BM1880_CLK_RV 41
+#define BM1880_CLK_APB_SPI 42
+#define BM1880_CLK_TPU_AXI 43
+#define BM1880_CLK_DIV_UART_500M 44
+#define BM1880_CLK_UART_500M 45
+#define BM1880_CLK_APB_UART 46
+#define BM1880_CLK_APB_I2S 47
+#define BM1880_CLK_AXI4_USB 48
+#define BM1880_CLK_APB_USB 49
+#define BM1880_CLK_125M_USB 50
+#define BM1880_CLK_33K_USB 51
+#define BM1880_CLK_DIV_12M_USB 52
+#define BM1880_CLK_12M_USB 53
+#define BM1880_CLK_APB_VIDEO 54
+#define BM1880_CLK_VIDEO_AXI 55
+#define BM1880_CLK_VPP_AXI 56
+#define BM1880_CLK_APB_VPP 57
+#define BM1880_CLK_DIV_0_AXI1 58
+#define BM1880_CLK_DIV_1_AXI1 59
+#define BM1880_CLK_AXI1 60
+#define BM1880_CLK_AXI2 61
+#define BM1880_CLK_AXI3 62
+#define BM1880_CLK_AXI4 63
+#define BM1880_CLK_AXI5 64
+#define BM1880_CLK_DIV_0_AXI6 65
+#define BM1880_CLK_DIV_1_AXI6 66
+#define BM1880_CLK_MUX_AXI6 67
+#define BM1880_CLK_AXI6 68
+#define BM1880_NR_CLKS 69
+
+#endif /* __DT_BINDINGS_CLOCK_BM1880_H */
diff --git a/include/dt-bindings/clock/dra7.h b/include/dt-bindings/clock/dra7.h
index 72f2e8411523..8cec5a1e1806 100644
--- a/include/dt-bindings/clock/dra7.h
+++ b/include/dt-bindings/clock/dra7.h
@@ -29,6 +29,16 @@
#define DRA7_RTC_CLKCTRL_INDEX(offset) ((offset) - DRA7_RTC_CLKCTRL_OFFSET)
#define DRA7_RTCSS_CLKCTRL DRA7_RTC_CLKCTRL_INDEX(0x44)
+/* vip clocks */
+#define DRA7_VIP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_VIP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+#define DRA7_VIP3_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
+
+/* vpe clocks */
+#define DRA7_VPE_CLKCTRL_OFFSET 0x60
+#define DRA7_VPE_CLKCTRL_INDEX(offset) ((offset) - DRA7_VPE_CLKCTRL_OFFSET)
+#define DRA7_VPE_CLKCTRL DRA7_VPE_CLKCTRL_INDEX(0x64)
+
/* coreaon clocks */
#define DRA7_SMARTREFLEX_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
#define DRA7_SMARTREFLEX_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x38)
@@ -78,6 +88,9 @@
#define DRA7_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
#define DRA7_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
+/* gpu clocks */
+#define DRA7_GPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+
/* l3init clocks */
#define DRA7_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
#define DRA7_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
@@ -192,6 +205,16 @@
/* rtc clocks */
#define DRA7_RTC_RTCSS_CLKCTRL DRA7_CLKCTRL_INDEX(0x44)
+/* vip clocks */
+#define DRA7_CAM_VIP1_CLKCTRL DRA7_CLKCTRL_INDEX(0x20)
+#define DRA7_CAM_VIP2_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
+#define DRA7_CAM_VIP3_CLKCTRL DRA7_CLKCTRL_INDEX(0x30)
+
+/* vpe clocks */
+#define DRA7_VPE_CLKCTRL_OFFSET 0x60
+#define DRA7_VPE_CLKCTRL_INDEX(offset) ((offset) - DRA7_VPE_CLKCTRL_OFFSET)
+#define DRA7_VPE_VPE_CLKCTRL DRA7_VPE_CLKCTRL_INDEX(0x64)
+
/* coreaon clocks */
#define DRA7_COREAON_SMARTREFLEX_MPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x28)
#define DRA7_COREAON_SMARTREFLEX_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x38)
diff --git a/include/dt-bindings/clock/imx7ulp-clock.h b/include/dt-bindings/clock/imx7ulp-clock.h
index 6f66f9005c81..38145bdcd975 100644
--- a/include/dt-bindings/clock/imx7ulp-clock.h
+++ b/include/dt-bindings/clock/imx7ulp-clock.h
@@ -49,6 +49,7 @@
#define IMX7ULP_CLK_NIC1_DIV 36
#define IMX7ULP_CLK_NIC1_BUS_DIV 37
#define IMX7ULP_CLK_NIC1_EXT_DIV 38
+/* IMX7ULP_CLK_MIPI_PLL is unsupported and shouldn't be used in DT */
#define IMX7ULP_CLK_MIPI_PLL 39
#define IMX7ULP_CLK_SIRC 40
#define IMX7ULP_CLK_SOSC_BUS_CLK 41
diff --git a/include/dt-bindings/clock/imx8mm-clock.h b/include/dt-bindings/clock/imx8mm-clock.h
index 07e6c686f3ef..edeece2289f0 100644
--- a/include/dt-bindings/clock/imx8mm-clock.h
+++ b/include/dt-bindings/clock/imx8mm-clock.h
@@ -248,6 +248,23 @@
#define IMX8MM_CLK_SNVS_ROOT 228
#define IMX8MM_CLK_GIC 229
-#define IMX8MM_CLK_END 230
+#define IMX8MM_SYS_PLL1_40M_CG 230
+#define IMX8MM_SYS_PLL1_80M_CG 231
+#define IMX8MM_SYS_PLL1_100M_CG 232
+#define IMX8MM_SYS_PLL1_133M_CG 233
+#define IMX8MM_SYS_PLL1_160M_CG 234
+#define IMX8MM_SYS_PLL1_200M_CG 235
+#define IMX8MM_SYS_PLL1_266M_CG 236
+#define IMX8MM_SYS_PLL1_400M_CG 237
+#define IMX8MM_SYS_PLL2_50M_CG 238
+#define IMX8MM_SYS_PLL2_100M_CG 239
+#define IMX8MM_SYS_PLL2_125M_CG 240
+#define IMX8MM_SYS_PLL2_166M_CG 241
+#define IMX8MM_SYS_PLL2_200M_CG 242
+#define IMX8MM_SYS_PLL2_250M_CG 243
+#define IMX8MM_SYS_PLL2_333M_CG 244
+#define IMX8MM_SYS_PLL2_500M_CG 245
+
+#define IMX8MM_CLK_END 246
#endif
diff --git a/include/dt-bindings/clock/imx8mn-clock.h b/include/dt-bindings/clock/imx8mn-clock.h
index d7b201652f4c..65ac6eb6c733 100644
--- a/include/dt-bindings/clock/imx8mn-clock.h
+++ b/include/dt-bindings/clock/imx8mn-clock.h
@@ -122,8 +122,8 @@
#define IMX8MN_CLK_I2C1 105
#define IMX8MN_CLK_I2C2 106
#define IMX8MN_CLK_I2C3 107
-#define IMX8MN_CLK_I2C4 118
-#define IMX8MN_CLK_UART1 119
+#define IMX8MN_CLK_I2C4 108
+#define IMX8MN_CLK_UART1 109
#define IMX8MN_CLK_UART2 110
#define IMX8MN_CLK_UART3 111
#define IMX8MN_CLK_UART4 112
@@ -211,6 +211,23 @@
#define IMX8MN_CLK_GPU_CORE_ROOT 193
#define IMX8MN_CLK_GIC 194
-#define IMX8MN_CLK_END 195
+#define IMX8MN_SYS_PLL1_40M_CG 195
+#define IMX8MN_SYS_PLL1_80M_CG 196
+#define IMX8MN_SYS_PLL1_100M_CG 197
+#define IMX8MN_SYS_PLL1_133M_CG 198
+#define IMX8MN_SYS_PLL1_160M_CG 199
+#define IMX8MN_SYS_PLL1_200M_CG 200
+#define IMX8MN_SYS_PLL1_266M_CG 201
+#define IMX8MN_SYS_PLL1_400M_CG 202
+#define IMX8MN_SYS_PLL2_50M_CG 203
+#define IMX8MN_SYS_PLL2_100M_CG 204
+#define IMX8MN_SYS_PLL2_125M_CG 205
+#define IMX8MN_SYS_PLL2_166M_CG 206
+#define IMX8MN_SYS_PLL2_200M_CG 207
+#define IMX8MN_SYS_PLL2_250M_CG 208
+#define IMX8MN_SYS_PLL2_333M_CG 209
+#define IMX8MN_SYS_PLL2_500M_CG 210
+
+#define IMX8MN_CLK_END 211
#endif
diff --git a/include/dt-bindings/clock/imx8mp-clock.h b/include/dt-bindings/clock/imx8mp-clock.h
new file mode 100644
index 000000000000..2fab63186bca
--- /dev/null
+++ b/include/dt-bindings/clock/imx8mp-clock.h
@@ -0,0 +1,300 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 NXP
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_IMX8MP_H
+#define __DT_BINDINGS_CLOCK_IMX8MP_H
+
+#define IMX8MP_CLK_DUMMY 0
+#define IMX8MP_CLK_32K 1
+#define IMX8MP_CLK_24M 2
+#define IMX8MP_OSC_HDMI_CLK 3
+#define IMX8MP_CLK_EXT1 4
+#define IMX8MP_CLK_EXT2 5
+#define IMX8MP_CLK_EXT3 6
+#define IMX8MP_CLK_EXT4 7
+#define IMX8MP_AUDIO_PLL1_REF_SEL 8
+#define IMX8MP_AUDIO_PLL2_REF_SEL 9
+#define IMX8MP_VIDEO_PLL1_REF_SEL 10
+#define IMX8MP_DRAM_PLL_REF_SEL 11
+#define IMX8MP_GPU_PLL_REF_SEL 12
+#define IMX8MP_VPU_PLL_REF_SEL 13
+#define IMX8MP_ARM_PLL_REF_SEL 14
+#define IMX8MP_SYS_PLL1_REF_SEL 15
+#define IMX8MP_SYS_PLL2_REF_SEL 16
+#define IMX8MP_SYS_PLL3_REF_SEL 17
+#define IMX8MP_AUDIO_PLL1 18
+#define IMX8MP_AUDIO_PLL2 19
+#define IMX8MP_VIDEO_PLL1 20
+#define IMX8MP_DRAM_PLL 21
+#define IMX8MP_GPU_PLL 22
+#define IMX8MP_VPU_PLL 23
+#define IMX8MP_ARM_PLL 24
+#define IMX8MP_SYS_PLL1 25
+#define IMX8MP_SYS_PLL2 26
+#define IMX8MP_SYS_PLL3 27
+#define IMX8MP_AUDIO_PLL1_BYPASS 28
+#define IMX8MP_AUDIO_PLL2_BYPASS 29
+#define IMX8MP_VIDEO_PLL1_BYPASS 30
+#define IMX8MP_DRAM_PLL_BYPASS 31
+#define IMX8MP_GPU_PLL_BYPASS 32
+#define IMX8MP_VPU_PLL_BYPASS 33
+#define IMX8MP_ARM_PLL_BYPASS 34
+#define IMX8MP_SYS_PLL1_BYPASS 35
+#define IMX8MP_SYS_PLL2_BYPASS 36
+#define IMX8MP_SYS_PLL3_BYPASS 37
+#define IMX8MP_AUDIO_PLL1_OUT 38
+#define IMX8MP_AUDIO_PLL2_OUT 39
+#define IMX8MP_VIDEO_PLL1_OUT 40
+#define IMX8MP_DRAM_PLL_OUT 41
+#define IMX8MP_GPU_PLL_OUT 42
+#define IMX8MP_VPU_PLL_OUT 43
+#define IMX8MP_ARM_PLL_OUT 44
+#define IMX8MP_SYS_PLL1_OUT 45
+#define IMX8MP_SYS_PLL2_OUT 46
+#define IMX8MP_SYS_PLL3_OUT 47
+#define IMX8MP_SYS_PLL1_40M 48
+#define IMX8MP_SYS_PLL1_80M 49
+#define IMX8MP_SYS_PLL1_100M 50
+#define IMX8MP_SYS_PLL1_133M 51
+#define IMX8MP_SYS_PLL1_160M 52
+#define IMX8MP_SYS_PLL1_200M 53
+#define IMX8MP_SYS_PLL1_266M 54
+#define IMX8MP_SYS_PLL1_400M 55
+#define IMX8MP_SYS_PLL1_800M 56
+#define IMX8MP_SYS_PLL2_50M 57
+#define IMX8MP_SYS_PLL2_100M 58
+#define IMX8MP_SYS_PLL2_125M 59
+#define IMX8MP_SYS_PLL2_166M 60
+#define IMX8MP_SYS_PLL2_200M 61
+#define IMX8MP_SYS_PLL2_250M 62
+#define IMX8MP_SYS_PLL2_333M 63
+#define IMX8MP_SYS_PLL2_500M 64
+#define IMX8MP_SYS_PLL2_1000M 65
+#define IMX8MP_CLK_A53_SRC 66
+#define IMX8MP_CLK_M7_SRC 67
+#define IMX8MP_CLK_ML_SRC 68
+#define IMX8MP_CLK_GPU3D_CORE_SRC 69
+#define IMX8MP_CLK_GPU3D_SHADER_SRC 70
+#define IMX8MP_CLK_GPU2D_SRC 71
+#define IMX8MP_CLK_AUDIO_AXI_SRC 72
+#define IMX8MP_CLK_HSIO_AXI_SRC 73
+#define IMX8MP_CLK_MEDIA_ISP_SRC 74
+#define IMX8MP_CLK_A53_CG 75
+#define IMX8MP_CLK_M4_CG 76
+#define IMX8MP_CLK_ML_CG 77
+#define IMX8MP_CLK_GPU3D_CORE_CG 78
+#define IMX8MP_CLK_GPU3D_SHADER_CG 79
+#define IMX8MP_CLK_GPU2D_CG 80
+#define IMX8MP_CLK_AUDIO_AXI_CG 81
+#define IMX8MP_CLK_HSIO_AXI_CG 82
+#define IMX8MP_CLK_MEDIA_ISP_CG 83
+#define IMX8MP_CLK_A53_DIV 84
+#define IMX8MP_CLK_M7_DIV 85
+#define IMX8MP_CLK_ML_DIV 86
+#define IMX8MP_CLK_GPU3D_CORE_DIV 87
+#define IMX8MP_CLK_GPU3D_SHADER_DIV 88
+#define IMX8MP_CLK_GPU2D_DIV 89
+#define IMX8MP_CLK_AUDIO_AXI_DIV 90
+#define IMX8MP_CLK_HSIO_AXI_DIV 91
+#define IMX8MP_CLK_MEDIA_ISP_DIV 92
+#define IMX8MP_CLK_MAIN_AXI 93
+#define IMX8MP_CLK_ENET_AXI 94
+#define IMX8MP_CLK_NAND_USDHC_BUS 95
+#define IMX8MP_CLK_VPU_BUS 96
+#define IMX8MP_CLK_MEDIA_AXI 97
+#define IMX8MP_CLK_MEDIA_APB 98
+#define IMX8MP_CLK_HDMI_APB 99
+#define IMX8MP_CLK_HDMI_AXI 100
+#define IMX8MP_CLK_GPU_AXI 101
+#define IMX8MP_CLK_GPU_AHB 102
+#define IMX8MP_CLK_NOC 103
+#define IMX8MP_CLK_NOC_IO 104
+#define IMX8MP_CLK_ML_AXI 105
+#define IMX8MP_CLK_ML_AHB 106
+#define IMX8MP_CLK_AHB 107
+#define IMX8MP_CLK_AUDIO_AHB 108
+#define IMX8MP_CLK_MIPI_DSI_ESC_RX 109
+#define IMX8MP_CLK_IPG_ROOT 110
+#define IMX8MP_CLK_IPG_AUDIO_ROOT 111
+#define IMX8MP_CLK_DRAM_ALT 112
+#define IMX8MP_CLK_DRAM_APB 113
+#define IMX8MP_CLK_VPU_G1 114
+#define IMX8MP_CLK_VPU_G2 115
+#define IMX8MP_CLK_CAN1 116
+#define IMX8MP_CLK_CAN2 117
+#define IMX8MP_CLK_MEMREPAIR 118
+#define IMX8MP_CLK_PCIE_PHY 119
+#define IMX8MP_CLK_PCIE_AUX 120
+#define IMX8MP_CLK_I2C5 121
+#define IMX8MP_CLK_I2C6 122
+#define IMX8MP_CLK_SAI1 123
+#define IMX8MP_CLK_SAI2 124
+#define IMX8MP_CLK_SAI3 125
+#define IMX8MP_CLK_SAI4 126
+#define IMX8MP_CLK_SAI5 127
+#define IMX8MP_CLK_SAI6 128
+#define IMX8MP_CLK_ENET_QOS 129
+#define IMX8MP_CLK_ENET_QOS_TIMER 130
+#define IMX8MP_CLK_ENET_REF 131
+#define IMX8MP_CLK_ENET_TIMER 132
+#define IMX8MP_CLK_ENET_PHY_REF 133
+#define IMX8MP_CLK_NAND 134
+#define IMX8MP_CLK_QSPI 135
+#define IMX8MP_CLK_USDHC1 136
+#define IMX8MP_CLK_USDHC2 137
+#define IMX8MP_CLK_I2C1 138
+#define IMX8MP_CLK_I2C2 139
+#define IMX8MP_CLK_I2C3 140
+#define IMX8MP_CLK_I2C4 141
+#define IMX8MP_CLK_UART1 142
+#define IMX8MP_CLK_UART2 143
+#define IMX8MP_CLK_UART3 144
+#define IMX8MP_CLK_UART4 145
+#define IMX8MP_CLK_USB_CORE_REF 146
+#define IMX8MP_CLK_USB_PHY_REF 147
+#define IMX8MP_CLK_GIC 148
+#define IMX8MP_CLK_ECSPI1 149
+#define IMX8MP_CLK_ECSPI2 150
+#define IMX8MP_CLK_PWM1 151
+#define IMX8MP_CLK_PWM2 152
+#define IMX8MP_CLK_PWM3 153
+#define IMX8MP_CLK_PWM4 154
+#define IMX8MP_CLK_GPT1 155
+#define IMX8MP_CLK_GPT2 156
+#define IMX8MP_CLK_GPT3 157
+#define IMX8MP_CLK_GPT4 158
+#define IMX8MP_CLK_GPT5 159
+#define IMX8MP_CLK_GPT6 160
+#define IMX8MP_CLK_TRACE 161
+#define IMX8MP_CLK_WDOG 162
+#define IMX8MP_CLK_WRCLK 163
+#define IMX8MP_CLK_IPP_DO_CLKO1 164
+#define IMX8MP_CLK_IPP_DO_CLKO2 165
+#define IMX8MP_CLK_HDMI_FDCC_TST 166
+#define IMX8MP_CLK_HDMI_27M 167
+#define IMX8MP_CLK_HDMI_REF_266M 168
+#define IMX8MP_CLK_USDHC3 169
+#define IMX8MP_CLK_MEDIA_CAM1_PIX 170
+#define IMX8MP_CLK_MEDIA_MIPI_PHY1_REF 171
+#define IMX8MP_CLK_MEDIA_DISP1_PIX 172
+#define IMX8MP_CLK_MEDIA_CAM2_PIX 173
+#define IMX8MP_CLK_MEDIA_MIPI_PHY2_REF 174
+#define IMX8MP_CLK_MEDIA_MIPI_CSI2_ESC 175
+#define IMX8MP_CLK_PCIE2_CTRL 176
+#define IMX8MP_CLK_PCIE2_PHY 177
+#define IMX8MP_CLK_MEDIA_MIPI_TEST_BYTE 178
+#define IMX8MP_CLK_ECSPI3 179
+#define IMX8MP_CLK_PDM 180
+#define IMX8MP_CLK_VPU_VC8000E 181
+#define IMX8MP_CLK_SAI7 182
+#define IMX8MP_CLK_GPC_ROOT 183
+#define IMX8MP_CLK_ANAMIX_ROOT 184
+#define IMX8MP_CLK_CPU_ROOT 185
+#define IMX8MP_CLK_CSU_ROOT 186
+#define IMX8MP_CLK_DEBUG_ROOT 187
+#define IMX8MP_CLK_DRAM1_ROOT 188
+#define IMX8MP_CLK_ECSPI1_ROOT 189
+#define IMX8MP_CLK_ECSPI2_ROOT 190
+#define IMX8MP_CLK_ECSPI3_ROOT 191
+#define IMX8MP_CLK_ENET1_ROOT 192
+#define IMX8MP_CLK_GPIO1_ROOT 193
+#define IMX8MP_CLK_GPIO2_ROOT 194
+#define IMX8MP_CLK_GPIO3_ROOT 195
+#define IMX8MP_CLK_GPIO4_ROOT 196
+#define IMX8MP_CLK_GPIO5_ROOT 197
+#define IMX8MP_CLK_GPT1_ROOT 198
+#define IMX8MP_CLK_GPT2_ROOT 199
+#define IMX8MP_CLK_GPT3_ROOT 200
+#define IMX8MP_CLK_GPT4_ROOT 201
+#define IMX8MP_CLK_GPT5_ROOT 202
+#define IMX8MP_CLK_GPT6_ROOT 203
+#define IMX8MP_CLK_HS_ROOT 204
+#define IMX8MP_CLK_I2C1_ROOT 205
+#define IMX8MP_CLK_I2C2_ROOT 206
+#define IMX8MP_CLK_I2C3_ROOT 207
+#define IMX8MP_CLK_I2C4_ROOT 208
+#define IMX8MP_CLK_IOMUX_ROOT 209
+#define IMX8MP_CLK_IPMUX1_ROOT 210
+#define IMX8MP_CLK_IPMUX2_ROOT 211
+#define IMX8MP_CLK_IPMUX3_ROOT 212
+#define IMX8MP_CLK_MU_ROOT 213
+#define IMX8MP_CLK_OCOTP_ROOT 214
+#define IMX8MP_CLK_OCRAM_ROOT 215
+#define IMX8MP_CLK_OCRAM_S_ROOT 216
+#define IMX8MP_CLK_PCIE_ROOT 217
+#define IMX8MP_CLK_PERFMON1_ROOT 218
+#define IMX8MP_CLK_PERFMON2_ROOT 219
+#define IMX8MP_CLK_PWM1_ROOT 220
+#define IMX8MP_CLK_PWM2_ROOT 221
+#define IMX8MP_CLK_PWM3_ROOT 222
+#define IMX8MP_CLK_PWM4_ROOT 223
+#define IMX8MP_CLK_QOS_ROOT 224
+#define IMX8MP_CLK_QOS_ENET_ROOT 225
+#define IMX8MP_CLK_QSPI_ROOT 226
+#define IMX8MP_CLK_NAND_ROOT 227
+#define IMX8MP_CLK_NAND_USDHC_BUS_RAWNAND_CLK 228
+#define IMX8MP_CLK_RDC_ROOT 229
+#define IMX8MP_CLK_ROM_ROOT 230
+#define IMX8MP_CLK_I2C5_ROOT 231
+#define IMX8MP_CLK_I2C6_ROOT 232
+#define IMX8MP_CLK_CAN1_ROOT 233
+#define IMX8MP_CLK_CAN2_ROOT 234
+#define IMX8MP_CLK_SCTR_ROOT 235
+#define IMX8MP_CLK_SDMA1_ROOT 236
+#define IMX8MP_CLK_ENET_QOS_ROOT 237
+#define IMX8MP_CLK_SEC_DEBUG_ROOT 238
+#define IMX8MP_CLK_SEMA1_ROOT 239
+#define IMX8MP_CLK_SEMA2_ROOT 240
+#define IMX8MP_CLK_IRQ_STEER_ROOT 241
+#define IMX8MP_CLK_SIM_ENET_ROOT 242
+#define IMX8MP_CLK_SIM_M_ROOT 243
+#define IMX8MP_CLK_SIM_MAIN_ROOT 244
+#define IMX8MP_CLK_SIM_S_ROOT 245
+#define IMX8MP_CLK_SIM_WAKEUP_ROOT 246
+#define IMX8MP_CLK_GPU2D_ROOT 247
+#define IMX8MP_CLK_GPU3D_ROOT 248
+#define IMX8MP_CLK_SNVS_ROOT 249
+#define IMX8MP_CLK_TRACE_ROOT 250
+#define IMX8MP_CLK_UART1_ROOT 251
+#define IMX8MP_CLK_UART2_ROOT 252
+#define IMX8MP_CLK_UART3_ROOT 253
+#define IMX8MP_CLK_UART4_ROOT 254
+#define IMX8MP_CLK_USB_ROOT 255
+#define IMX8MP_CLK_USB_PHY_ROOT 256
+#define IMX8MP_CLK_USDHC1_ROOT 257
+#define IMX8MP_CLK_USDHC2_ROOT 258
+#define IMX8MP_CLK_WDOG1_ROOT 259
+#define IMX8MP_CLK_WDOG2_ROOT 260
+#define IMX8MP_CLK_WDOG3_ROOT 261
+#define IMX8MP_CLK_VPU_G1_ROOT 262
+#define IMX8MP_CLK_GPU_ROOT 263
+#define IMX8MP_CLK_NOC_WRAPPER_ROOT 264
+#define IMX8MP_CLK_VPU_VC8KE_ROOT 265
+#define IMX8MP_CLK_VPU_G2_ROOT 266
+#define IMX8MP_CLK_NPU_ROOT 267
+#define IMX8MP_CLK_HSIO_ROOT 268
+#define IMX8MP_CLK_MEDIA_APB_ROOT 269
+#define IMX8MP_CLK_MEDIA_AXI_ROOT 270
+#define IMX8MP_CLK_MEDIA_CAM1_PIX_ROOT 271
+#define IMX8MP_CLK_MEDIA_CAM2_PIX_ROOT 272
+#define IMX8MP_CLK_MEDIA_DISP1_PIX_ROOT 273
+#define IMX8MP_CLK_MEDIA_DISP2_PIX_ROOT 274
+#define IMX8MP_CLK_MEDIA_MIPI_PHY1_REF_ROOT 275
+#define IMX8MP_CLK_MEDIA_ISP_ROOT 276
+#define IMX8MP_CLK_USDHC3_ROOT 277
+#define IMX8MP_CLK_HDMI_ROOT 278
+#define IMX8MP_CLK_XTAL_ROOT 279
+#define IMX8MP_CLK_PLL_ROOT 280
+#define IMX8MP_CLK_TSENSOR_ROOT 281
+#define IMX8MP_CLK_VPU_ROOT 282
+#define IMX8MP_CLK_MRPR_ROOT 283
+#define IMX8MP_CLK_AUDIO_ROOT 284
+#define IMX8MP_CLK_DRAM_ALT_ROOT 285
+#define IMX8MP_CLK_DRAM_CORE 286
+#define IMX8MP_CLK_ARM 287
+
+#define IMX8MP_CLK_END 288
+
+#endif
diff --git a/include/dt-bindings/clock/imx8mq-clock.h b/include/dt-bindings/clock/imx8mq-clock.h
index 65463673d25e..3bab9b21c8d7 100644
--- a/include/dt-bindings/clock/imx8mq-clock.h
+++ b/include/dt-bindings/clock/imx8mq-clock.h
@@ -403,5 +403,27 @@
#define IMX8MQ_CLK_SNVS_ROOT 264
#define IMX8MQ_CLK_GIC 265
-#define IMX8MQ_CLK_END 266
+#define IMX8MQ_VIDEO2_PLL1_REF_SEL 266
+
+#define IMX8MQ_SYS1_PLL_40M_CG 267
+#define IMX8MQ_SYS1_PLL_80M_CG 268
+#define IMX8MQ_SYS1_PLL_100M_CG 269
+#define IMX8MQ_SYS1_PLL_133M_CG 270
+#define IMX8MQ_SYS1_PLL_160M_CG 271
+#define IMX8MQ_SYS1_PLL_200M_CG 272
+#define IMX8MQ_SYS1_PLL_266M_CG 273
+#define IMX8MQ_SYS1_PLL_400M_CG 274
+#define IMX8MQ_SYS1_PLL_800M_CG 275
+#define IMX8MQ_SYS2_PLL_50M_CG 276
+#define IMX8MQ_SYS2_PLL_100M_CG 277
+#define IMX8MQ_SYS2_PLL_125M_CG 278
+#define IMX8MQ_SYS2_PLL_166M_CG 279
+#define IMX8MQ_SYS2_PLL_200M_CG 280
+#define IMX8MQ_SYS2_PLL_250M_CG 281
+#define IMX8MQ_SYS2_PLL_333M_CG 282
+#define IMX8MQ_SYS2_PLL_500M_CG 283
+#define IMX8MQ_SYS2_PLL_1000M_CG 284
+
+#define IMX8MQ_CLK_END 285
+
#endif /* __DT_BINDINGS_CLOCK_IMX8MQ_H */
diff --git a/include/dt-bindings/clock/marvell,mmp2.h b/include/dt-bindings/clock/marvell,mmp2.h
index e785c6eb3561..4b1a7724f20d 100644
--- a/include/dt-bindings/clock/marvell,mmp2.h
+++ b/include/dt-bindings/clock/marvell,mmp2.h
@@ -72,6 +72,8 @@
#define MMP2_CLK_CCIC1_PHY 118
#define MMP2_CLK_CCIC1_SPHY 119
#define MMP2_CLK_DISP0_LCDC 120
+#define MMP2_CLK_USBHSIC0 121
+#define MMP2_CLK_USBHSIC1 122
#define MMP2_NR_CLKS 200
#endif
diff --git a/include/dt-bindings/clock/meson8-ddr-clkc.h b/include/dt-bindings/clock/meson8-ddr-clkc.h
new file mode 100644
index 000000000000..a8e0fa2987ab
--- /dev/null
+++ b/include/dt-bindings/clock/meson8-ddr-clkc.h
@@ -0,0 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define DDR_CLKID_DDR_PLL_DCO 0
+#define DDR_CLKID_DDR_PLL 1
diff --git a/include/dt-bindings/clock/omap4.h b/include/dt-bindings/clock/omap4.h
index 5167b2d93ac3..88d73be84b94 100644
--- a/include/dt-bindings/clock/omap4.h
+++ b/include/dt-bindings/clock/omap4.h
@@ -124,6 +124,17 @@
#define OMAP4_UART4_CLKCTRL OMAP4_CLKCTRL_INDEX(0x158)
#define OMAP4_MMC5_CLKCTRL OMAP4_CLKCTRL_INDEX(0x160)
+/* l4_secure clocks */
+#define OMAP4_L4_SECURE_CLKCTRL_OFFSET 0x1a0
+#define OMAP4_L4_SECURE_CLKCTRL_INDEX(offset) ((offset) - OMAP4_L4_SECURE_CLKCTRL_OFFSET)
+#define OMAP4_AES1_CLKCTRL OMAP4_L4_SECURE_CLKCTRL_INDEX(0x1a0)
+#define OMAP4_AES2_CLKCTRL OMAP4_L4_SECURE_CLKCTRL_INDEX(0x1a8)
+#define OMAP4_DES3DES_CLKCTRL OMAP4_L4_SECURE_CLKCTRL_INDEX(0x1b0)
+#define OMAP4_PKA_CLKCTRL OMAP4_L4_SECURE_CLKCTRL_INDEX(0x1b8)
+#define OMAP4_RNG_CLKCTRL OMAP4_L4_SECURE_CLKCTRL_INDEX(0x1c0)
+#define OMAP4_SHA2MD5_CLKCTRL OMAP4_L4_SECURE_CLKCTRL_INDEX(0x1c8)
+#define OMAP4_CRYPTODMA_CLKCTRL OMAP4_L4_SECURE_CLKCTRL_INDEX(0x1d8)
+
/* l4_wkup clocks */
#define OMAP4_L4_WKUP_CLKCTRL OMAP4_CLKCTRL_INDEX(0x20)
#define OMAP4_WD_TIMER2_CLKCTRL OMAP4_CLKCTRL_INDEX(0x30)
diff --git a/include/dt-bindings/clock/omap5.h b/include/dt-bindings/clock/omap5.h
index e5411938983c..41775272fd27 100644
--- a/include/dt-bindings/clock/omap5.h
+++ b/include/dt-bindings/clock/omap5.h
@@ -16,6 +16,7 @@
/* abe clocks */
#define OMAP5_L4_ABE_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
+#define OMAP5_AESS_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28)
#define OMAP5_MCPDM_CLKCTRL OMAP5_CLKCTRL_INDEX(0x30)
#define OMAP5_DMIC_CLKCTRL OMAP5_CLKCTRL_INDEX(0x38)
#define OMAP5_MCBSP1_CLKCTRL OMAP5_CLKCTRL_INDEX(0x48)
@@ -86,6 +87,21 @@
#define OMAP5_UART5_CLKCTRL OMAP5_CLKCTRL_INDEX(0x170)
#define OMAP5_UART6_CLKCTRL OMAP5_CLKCTRL_INDEX(0x178)
+/* l4_secure clocks */
+#define OMAP5_L4_SECURE_CLKCTRL_OFFSET 0x1a0
+#define OMAP5_L4_SECURE_CLKCTRL_INDEX(offset) ((offset) - OMAP5_L4_SECURE_CLKCTRL_OFFSET)
+#define OMAP5_AES1_CLKCTRL OMAP5_L4_SECURE_CLKCTRL_INDEX(0x1a0)
+#define OMAP5_AES2_CLKCTRL OMAP5_L4_SECURE_CLKCTRL_INDEX(0x1a8)
+#define OMAP5_DES3DES_CLKCTRL OMAP5_L4_SECURE_CLKCTRL_INDEX(0x1b0)
+#define OMAP5_FPKA_CLKCTRL OMAP5_L4_SECURE_CLKCTRL_INDEX(0x1b8)
+#define OMAP5_RNG_CLKCTRL OMAP5_L4_SECURE_CLKCTRL_INDEX(0x1c0)
+#define OMAP5_SHA2MD5_CLKCTRL OMAP5_L4_SECURE_CLKCTRL_INDEX(0x1c8)
+#define OMAP5_DMA_CRYPTO_CLKCTRL OMAP5_L4_SECURE_CLKCTRL_INDEX(0x1d8)
+
+/* iva clocks */
+#define OMAP5_IVA_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
+#define OMAP5_SL2IF_CLKCTRL OMAP5_CLKCTRL_INDEX(0x28)
+
/* dss clocks */
#define OMAP5_DSS_CORE_CLKCTRL OMAP5_CLKCTRL_INDEX(0x20)
diff --git a/include/dt-bindings/clock/px30-cru.h b/include/dt-bindings/clock/px30-cru.h
index 00101479f7c4..5b1416fcde6f 100644
--- a/include/dt-bindings/clock/px30-cru.h
+++ b/include/dt-bindings/clock/px30-cru.h
@@ -85,6 +85,8 @@
#define SCLK_EMMC_DIV50 83
#define SCLK_DDRCLK 84
#define SCLK_UART1_SRC 85
+#define SCLK_SDMMC_DIV 86
+#define SCLK_SDMMC_DIV50 87
/* dclk gates */
#define DCLK_VOPB 150
diff --git a/include/dt-bindings/clock/qcom,dispcc-sc7180.h b/include/dt-bindings/clock/qcom,dispcc-sc7180.h
new file mode 100644
index 000000000000..b9b51617a335
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,dispcc-sc7180.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_DISP_CC_SC7180_H
+#define _DT_BINDINGS_CLK_QCOM_DISP_CC_SC7180_H
+
+#define DISP_CC_PLL0 0
+#define DISP_CC_PLL0_OUT_EVEN 1
+#define DISP_CC_MDSS_AHB_CLK 2
+#define DISP_CC_MDSS_AHB_CLK_SRC 3
+#define DISP_CC_MDSS_BYTE0_CLK 4
+#define DISP_CC_MDSS_BYTE0_CLK_SRC 5
+#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 6
+#define DISP_CC_MDSS_BYTE0_INTF_CLK 7
+#define DISP_CC_MDSS_DP_AUX_CLK 8
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC 9
+#define DISP_CC_MDSS_DP_CRYPTO_CLK 10
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 11
+#define DISP_CC_MDSS_DP_LINK_CLK 12
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC 13
+#define DISP_CC_MDSS_DP_LINK_DIV_CLK_SRC 14
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK 15
+#define DISP_CC_MDSS_DP_PIXEL_CLK 16
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 17
+#define DISP_CC_MDSS_ESC0_CLK 18
+#define DISP_CC_MDSS_ESC0_CLK_SRC 19
+#define DISP_CC_MDSS_MDP_CLK 20
+#define DISP_CC_MDSS_MDP_CLK_SRC 21
+#define DISP_CC_MDSS_MDP_LUT_CLK 22
+#define DISP_CC_MDSS_NON_GDSC_AHB_CLK 23
+#define DISP_CC_MDSS_PCLK0_CLK 24
+#define DISP_CC_MDSS_PCLK0_CLK_SRC 25
+#define DISP_CC_MDSS_ROT_CLK 26
+#define DISP_CC_MDSS_ROT_CLK_SRC 27
+#define DISP_CC_MDSS_RSCC_AHB_CLK 28
+#define DISP_CC_MDSS_RSCC_VSYNC_CLK 29
+#define DISP_CC_MDSS_VSYNC_CLK 30
+#define DISP_CC_MDSS_VSYNC_CLK_SRC 31
+#define DISP_CC_XO_CLK 32
+
+/* DISP_CC GDSCR */
+#define MDSS_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,dispcc-sdm845.h b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
index 11eed4bc9646..4016fd1d5b46 100644
--- a/include/dt-bindings/clock/qcom,dispcc-sdm845.h
+++ b/include/dt-bindings/clock/qcom,dispcc-sdm845.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _DT_BINDINGS_CLK_SDM_DISP_CC_SDM845_H
@@ -35,6 +35,17 @@
#define DISP_CC_PLL0 25
#define DISP_CC_MDSS_BYTE0_DIV_CLK_SRC 26
#define DISP_CC_MDSS_BYTE1_DIV_CLK_SRC 27
+#define DISP_CC_MDSS_DP_AUX_CLK 28
+#define DISP_CC_MDSS_DP_AUX_CLK_SRC 29
+#define DISP_CC_MDSS_DP_CRYPTO_CLK 30
+#define DISP_CC_MDSS_DP_CRYPTO_CLK_SRC 31
+#define DISP_CC_MDSS_DP_LINK_CLK 32
+#define DISP_CC_MDSS_DP_LINK_CLK_SRC 33
+#define DISP_CC_MDSS_DP_LINK_INTF_CLK 34
+#define DISP_CC_MDSS_DP_PIXEL1_CLK 35
+#define DISP_CC_MDSS_DP_PIXEL1_CLK_SRC 36
+#define DISP_CC_MDSS_DP_PIXEL_CLK 37
+#define DISP_CC_MDSS_DP_PIXEL_CLK_SRC 38
/* DISP_CC Reset */
#define DISP_CC_MDSS_RSCC_BCR 0
diff --git a/include/dt-bindings/clock/qcom,gcc-ipq6018.h b/include/dt-bindings/clock/qcom,gcc-ipq6018.h
new file mode 100644
index 000000000000..6f4be3aa0acf
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-ipq6018.h
@@ -0,0 +1,262 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLOCK_IPQ_GCC_6018_H
+#define _DT_BINDINGS_CLOCK_IPQ_GCC_6018_H
+
+#define GPLL0 0
+#define UBI32_PLL 1
+#define GPLL6 2
+#define GPLL4 3
+#define PCNOC_BFDCD_CLK_SRC 4
+#define GPLL2 5
+#define NSS_CRYPTO_PLL 6
+#define NSS_PPE_CLK_SRC 7
+#define GCC_XO_CLK_SRC 8
+#define NSS_CE_CLK_SRC 9
+#define GCC_SLEEP_CLK_SRC 10
+#define APSS_AHB_CLK_SRC 11
+#define NSS_PORT5_RX_CLK_SRC 12
+#define NSS_PORT5_TX_CLK_SRC 13
+#define PCIE0_AXI_CLK_SRC 14
+#define USB0_MASTER_CLK_SRC 15
+#define APSS_AHB_POSTDIV_CLK_SRC 16
+#define NSS_PORT1_RX_CLK_SRC 17
+#define NSS_PORT1_TX_CLK_SRC 18
+#define NSS_PORT2_RX_CLK_SRC 19
+#define NSS_PORT2_TX_CLK_SRC 20
+#define NSS_PORT3_RX_CLK_SRC 21
+#define NSS_PORT3_TX_CLK_SRC 22
+#define NSS_PORT4_RX_CLK_SRC 23
+#define NSS_PORT4_TX_CLK_SRC 24
+#define NSS_PORT5_RX_DIV_CLK_SRC 25
+#define NSS_PORT5_TX_DIV_CLK_SRC 26
+#define APSS_AXI_CLK_SRC 27
+#define NSS_CRYPTO_CLK_SRC 28
+#define NSS_PORT1_RX_DIV_CLK_SRC 29
+#define NSS_PORT1_TX_DIV_CLK_SRC 30
+#define NSS_PORT2_RX_DIV_CLK_SRC 31
+#define NSS_PORT2_TX_DIV_CLK_SRC 32
+#define NSS_PORT3_RX_DIV_CLK_SRC 33
+#define NSS_PORT3_TX_DIV_CLK_SRC 34
+#define NSS_PORT4_RX_DIV_CLK_SRC 35
+#define NSS_PORT4_TX_DIV_CLK_SRC 36
+#define NSS_UBI0_CLK_SRC 37
+#define BLSP1_QUP1_I2C_APPS_CLK_SRC 38
+#define BLSP1_QUP1_SPI_APPS_CLK_SRC 39
+#define BLSP1_QUP2_I2C_APPS_CLK_SRC 40
+#define BLSP1_QUP2_SPI_APPS_CLK_SRC 41
+#define BLSP1_QUP3_I2C_APPS_CLK_SRC 42
+#define BLSP1_QUP3_SPI_APPS_CLK_SRC 43
+#define BLSP1_QUP4_I2C_APPS_CLK_SRC 44
+#define BLSP1_QUP4_SPI_APPS_CLK_SRC 45
+#define BLSP1_QUP5_I2C_APPS_CLK_SRC 46
+#define BLSP1_QUP5_SPI_APPS_CLK_SRC 47
+#define BLSP1_QUP6_I2C_APPS_CLK_SRC 48
+#define BLSP1_QUP6_SPI_APPS_CLK_SRC 49
+#define BLSP1_UART1_APPS_CLK_SRC 50
+#define BLSP1_UART2_APPS_CLK_SRC 51
+#define BLSP1_UART3_APPS_CLK_SRC 52
+#define BLSP1_UART4_APPS_CLK_SRC 53
+#define BLSP1_UART5_APPS_CLK_SRC 54
+#define BLSP1_UART6_APPS_CLK_SRC 55
+#define CRYPTO_CLK_SRC 56
+#define NSS_UBI0_DIV_CLK_SRC 57
+#define PCIE0_AUX_CLK_SRC 58
+#define PCIE0_PIPE_CLK_SRC 59
+#define SDCC1_APPS_CLK_SRC 60
+#define USB0_AUX_CLK_SRC 61
+#define USB0_MOCK_UTMI_CLK_SRC 62
+#define USB0_PIPE_CLK_SRC 63
+#define USB1_MOCK_UTMI_CLK_SRC 64
+#define GCC_APSS_AHB_CLK 65
+#define GCC_APSS_AXI_CLK 66
+#define GCC_BLSP1_AHB_CLK 67
+#define GCC_BLSP1_QUP1_I2C_APPS_CLK 68
+#define GCC_BLSP1_QUP1_SPI_APPS_CLK 69
+#define GCC_BLSP1_QUP2_I2C_APPS_CLK 70
+#define GCC_BLSP1_QUP2_SPI_APPS_CLK 71
+#define GCC_BLSP1_QUP3_I2C_APPS_CLK 72
+#define GCC_BLSP1_QUP3_SPI_APPS_CLK 73
+#define GCC_BLSP1_QUP4_I2C_APPS_CLK 74
+#define GCC_BLSP1_QUP4_SPI_APPS_CLK 75
+#define GCC_BLSP1_QUP5_I2C_APPS_CLK 76
+#define GCC_BLSP1_QUP5_SPI_APPS_CLK 77
+#define GCC_BLSP1_QUP6_I2C_APPS_CLK 78
+#define GCC_BLSP1_QUP6_SPI_APPS_CLK 79
+#define GCC_BLSP1_UART1_APPS_CLK 80
+#define GCC_BLSP1_UART2_APPS_CLK 81
+#define GCC_BLSP1_UART3_APPS_CLK 82
+#define GCC_BLSP1_UART4_APPS_CLK 83
+#define GCC_BLSP1_UART5_APPS_CLK 84
+#define GCC_BLSP1_UART6_APPS_CLK 85
+#define GCC_CRYPTO_AHB_CLK 86
+#define GCC_CRYPTO_AXI_CLK 87
+#define GCC_CRYPTO_CLK 88
+#define GCC_XO_CLK 89
+#define GCC_XO_DIV4_CLK 90
+#define GCC_MDIO_AHB_CLK 91
+#define GCC_CRYPTO_PPE_CLK 92
+#define GCC_NSS_CE_APB_CLK 93
+#define GCC_NSS_CE_AXI_CLK 94
+#define GCC_NSS_CFG_CLK 95
+#define GCC_NSS_CRYPTO_CLK 96
+#define GCC_NSS_CSR_CLK 97
+#define GCC_NSS_EDMA_CFG_CLK 98
+#define GCC_NSS_EDMA_CLK 99
+#define GCC_NSS_NOC_CLK 100
+#define GCC_NSS_PORT1_RX_CLK 101
+#define GCC_NSS_PORT1_TX_CLK 102
+#define GCC_NSS_PORT2_RX_CLK 103
+#define GCC_NSS_PORT2_TX_CLK 104
+#define GCC_NSS_PORT3_RX_CLK 105
+#define GCC_NSS_PORT3_TX_CLK 106
+#define GCC_NSS_PORT4_RX_CLK 107
+#define GCC_NSS_PORT4_TX_CLK 108
+#define GCC_NSS_PORT5_RX_CLK 109
+#define GCC_NSS_PORT5_TX_CLK 110
+#define GCC_NSS_PPE_CFG_CLK 111
+#define GCC_NSS_PPE_CLK 112
+#define GCC_NSS_PPE_IPE_CLK 113
+#define GCC_NSS_PTP_REF_CLK 114
+#define GCC_NSSNOC_CE_APB_CLK 115
+#define GCC_NSSNOC_CE_AXI_CLK 116
+#define GCC_NSSNOC_CRYPTO_CLK 117
+#define GCC_NSSNOC_PPE_CFG_CLK 118
+#define GCC_NSSNOC_PPE_CLK 119
+#define GCC_NSSNOC_QOSGEN_REF_CLK 120
+#define GCC_NSSNOC_TIMEOUT_REF_CLK 121
+#define GCC_NSSNOC_UBI0_AHB_CLK 122
+#define GCC_PORT1_MAC_CLK 123
+#define GCC_PORT2_MAC_CLK 124
+#define GCC_PORT3_MAC_CLK 125
+#define GCC_PORT4_MAC_CLK 126
+#define GCC_PORT5_MAC_CLK 127
+#define GCC_UBI0_AHB_CLK 128
+#define GCC_UBI0_AXI_CLK 129
+#define GCC_UBI0_CORE_CLK 130
+#define GCC_PCIE0_AHB_CLK 131
+#define GCC_PCIE0_AUX_CLK 132
+#define GCC_PCIE0_AXI_M_CLK 133
+#define GCC_PCIE0_AXI_S_CLK 134
+#define GCC_PCIE0_PIPE_CLK 135
+#define GCC_PRNG_AHB_CLK 136
+#define GCC_QPIC_AHB_CLK 137
+#define GCC_QPIC_CLK 138
+#define GCC_SDCC1_AHB_CLK 139
+#define GCC_SDCC1_APPS_CLK 140
+#define GCC_UNIPHY0_AHB_CLK 141
+#define GCC_UNIPHY0_PORT1_RX_CLK 142
+#define GCC_UNIPHY0_PORT1_TX_CLK 143
+#define GCC_UNIPHY0_PORT2_RX_CLK 144
+#define GCC_UNIPHY0_PORT2_TX_CLK 145
+#define GCC_UNIPHY0_PORT3_RX_CLK 146
+#define GCC_UNIPHY0_PORT3_TX_CLK 147
+#define GCC_UNIPHY0_PORT4_RX_CLK 148
+#define GCC_UNIPHY0_PORT4_TX_CLK 149
+#define GCC_UNIPHY0_PORT5_RX_CLK 150
+#define GCC_UNIPHY0_PORT5_TX_CLK 151
+#define GCC_UNIPHY0_SYS_CLK 152
+#define GCC_UNIPHY1_AHB_CLK 153
+#define GCC_UNIPHY1_PORT5_RX_CLK 154
+#define GCC_UNIPHY1_PORT5_TX_CLK 155
+#define GCC_UNIPHY1_SYS_CLK 156
+#define GCC_USB0_AUX_CLK 157
+#define GCC_USB0_MASTER_CLK 158
+#define GCC_USB0_MOCK_UTMI_CLK 159
+#define GCC_USB0_PHY_CFG_AHB_CLK 160
+#define GCC_USB0_PIPE_CLK 161
+#define GCC_USB0_SLEEP_CLK 162
+#define GCC_USB1_MASTER_CLK 163
+#define GCC_USB1_MOCK_UTMI_CLK 164
+#define GCC_USB1_PHY_CFG_AHB_CLK 165
+#define GCC_USB1_SLEEP_CLK 166
+#define GP1_CLK_SRC 167
+#define GP2_CLK_SRC 168
+#define GP3_CLK_SRC 169
+#define GCC_GP1_CLK 170
+#define GCC_GP2_CLK 171
+#define GCC_GP3_CLK 172
+#define SYSTEM_NOC_BFDCD_CLK_SRC 173
+#define GCC_NSSNOC_SNOC_CLK 174
+#define GCC_UBI0_NC_AXI_CLK 175
+#define GCC_UBI1_NC_AXI_CLK 176
+#define GPLL0_MAIN 177
+#define UBI32_PLL_MAIN 178
+#define GPLL6_MAIN 179
+#define GPLL4_MAIN 180
+#define GPLL2_MAIN 181
+#define NSS_CRYPTO_PLL_MAIN 182
+#define GCC_CMN_12GPLL_AHB_CLK 183
+#define GCC_CMN_12GPLL_SYS_CLK 184
+#define GCC_SNOC_BUS_TIMEOUT2_AHB_CLK 185
+#define GCC_SYS_NOC_USB0_AXI_CLK 186
+#define GCC_SYS_NOC_PCIE0_AXI_CLK 187
+#define QDSS_TSCTR_CLK_SRC 188
+#define QDSS_AT_CLK_SRC 189
+#define GCC_QDSS_AT_CLK 190
+#define GCC_QDSS_DAP_CLK 191
+#define ADSS_PWM_CLK_SRC 192
+#define GCC_ADSS_PWM_CLK 193
+#define SDCC1_ICE_CORE_CLK_SRC 194
+#define GCC_SDCC1_ICE_CORE_CLK 195
+#define GCC_DCC_CLK 196
+#define PCIE0_RCHNG_CLK_SRC 197
+#define GCC_PCIE0_AXI_S_BRIDGE_CLK 198
+#define PCIE0_RCHNG_CLK 199
+#define UBI32_MEM_NOC_BFDCD_CLK_SRC 200
+#define WCSS_AHB_CLK_SRC 201
+#define Q6_AXI_CLK_SRC 202
+#define GCC_Q6SS_PCLKDBG_CLK 203
+#define GCC_Q6_TSCTR_1TO2_CLK 204
+#define GCC_WCSS_CORE_TBU_CLK 205
+#define GCC_WCSS_AXI_M_CLK 206
+#define GCC_SYS_NOC_WCSS_AHB_CLK 207
+#define GCC_Q6_AXIM_CLK 208
+#define GCC_Q6SS_ATBM_CLK 209
+#define GCC_WCSS_Q6_TBU_CLK 210
+#define GCC_Q6_AXIM2_CLK 211
+#define GCC_Q6_AHB_CLK 212
+#define GCC_Q6_AHB_S_CLK 213
+#define GCC_WCSS_DBG_IFC_APB_CLK 214
+#define GCC_WCSS_DBG_IFC_ATB_CLK 215
+#define GCC_WCSS_DBG_IFC_NTS_CLK 216
+#define GCC_WCSS_DBG_IFC_DAPBUS_CLK 217
+#define GCC_WCSS_DBG_IFC_APB_BDG_CLK 218
+#define GCC_WCSS_DBG_IFC_ATB_BDG_CLK 219
+#define GCC_WCSS_DBG_IFC_NTS_BDG_CLK 220
+#define GCC_WCSS_DBG_IFC_DAPBUS_BDG_CLK 221
+#define GCC_WCSS_ECAHB_CLK 222
+#define GCC_WCSS_ACMT_CLK 223
+#define GCC_WCSS_AHB_S_CLK 224
+#define GCC_RBCPR_WCSS_CLK 225
+#define RBCPR_WCSS_CLK_SRC 226
+#define GCC_RBCPR_WCSS_AHB_CLK 227
+#define GCC_LPASS_CORE_AXIM_CLK 228
+#define GCC_LPASS_SNOC_CFG_CLK 229
+#define GCC_LPASS_Q6_AXIM_CLK 230
+#define GCC_LPASS_Q6_ATBM_AT_CLK 231
+#define GCC_LPASS_Q6_PCLKDBG_CLK 232
+#define GCC_LPASS_Q6SS_TSCTR_1TO2_CLK 233
+#define GCC_LPASS_Q6SS_TRIG_CLK 234
+#define GCC_LPASS_TBU_CLK 235
+#define LPASS_CORE_AXIM_CLK_SRC 236
+#define LPASS_SNOC_CFG_CLK_SRC 237
+#define LPASS_Q6_AXIM_CLK_SRC 238
+#define GCC_PCNOC_LPASS_CLK 239
+#define GCC_UBI0_UTCM_CLK 240
+#define SNOC_NSSNOC_BFDCD_CLK_SRC 241
+#define GCC_SNOC_NSSNOC_CLK 242
+#define GCC_MEM_NOC_Q6_AXI_CLK 243
+#define GCC_MEM_NOC_UBI32_CLK 244
+#define GCC_MEM_NOC_LPASS_CLK 245
+#define GCC_SNOC_LPASS_CFG_CLK 246
+#define GCC_SYS_NOC_QDSS_STM_AXI_CLK 247
+#define GCC_QDSS_STM_CLK 248
+#define GCC_QDSS_TRACECLKIN_CLK 249
+#define QDSS_STM_CLK_SRC 250
+#define QDSS_TRACECLKIN_CLK_SRC 251
+#define GCC_NSSNOC_ATB_CLK 252
+#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-msm8998.h b/include/dt-bindings/clock/qcom,gcc-msm8998.h
index ab376262fcea..63e02dc32a0b 100644
--- a/include/dt-bindings/clock/qcom,gcc-msm8998.h
+++ b/include/dt-bindings/clock/qcom,gcc-msm8998.h
@@ -177,6 +177,12 @@
#define GCC_UFS_CLKREF_CLK 168
#define GCC_PCIE_CLKREF_CLK 169
#define GCC_RX1_USB2_CLKREF_CLK 170
+#define GCC_MSS_CFG_AHB_CLK 171
+#define GCC_BOOT_ROM_AHB_CLK 172
+#define GCC_MSS_GPLL0_DIV_CLK_SRC 173
+#define GCC_MSS_SNOC_AXI_CLK 174
+#define GCC_MSS_MNOC_BIMC_AXI_CLK 175
+#define GCC_BIMC_GFX_CLK 176
#define PCIE_0_GDSC 0
#define UFS_GDSC 1
@@ -290,5 +296,6 @@
#define GCC_MSMPU_BCR 105
#define GCC_QUSB2PHY_PRIM_BCR 106
#define GCC_QUSB2PHY_SEC_BCR 107
+#define GCC_MSS_RESTART 108
#endif
diff --git a/include/dt-bindings/clock/qcom,gcc-sc7180.h b/include/dt-bindings/clock/qcom,gcc-sc7180.h
new file mode 100644
index 000000000000..e8029b2e92d7
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gcc-sc7180.h
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GCC_SC7180_H
+#define _DT_BINDINGS_CLK_QCOM_GCC_SC7180_H
+
+/* GCC clocks */
+#define GCC_GPLL0_MAIN_DIV_CDIV 0
+#define GPLL0 1
+#define GPLL0_OUT_EVEN 2
+#define GPLL1 3
+#define GPLL4 4
+#define GPLL6 5
+#define GPLL7 6
+#define GCC_AGGRE_UFS_PHY_AXI_CLK 7
+#define GCC_AGGRE_USB3_PRIM_AXI_CLK 8
+#define GCC_BOOT_ROM_AHB_CLK 9
+#define GCC_CAMERA_AHB_CLK 10
+#define GCC_CAMERA_HF_AXI_CLK 11
+#define GCC_CAMERA_THROTTLE_HF_AXI_CLK 12
+#define GCC_CAMERA_XO_CLK 13
+#define GCC_CE1_AHB_CLK 14
+#define GCC_CE1_AXI_CLK 15
+#define GCC_CE1_CLK 16
+#define GCC_CFG_NOC_USB3_PRIM_AXI_CLK 17
+#define GCC_CPUSS_AHB_CLK 18
+#define GCC_CPUSS_AHB_CLK_SRC 19
+#define GCC_CPUSS_GNOC_CLK 20
+#define GCC_CPUSS_RBCPR_CLK 21
+#define GCC_DDRSS_GPU_AXI_CLK 22
+#define GCC_DISP_AHB_CLK 23
+#define GCC_DISP_GPLL0_CLK_SRC 24
+#define GCC_DISP_GPLL0_DIV_CLK_SRC 25
+#define GCC_DISP_HF_AXI_CLK 26
+#define GCC_DISP_THROTTLE_HF_AXI_CLK 27
+#define GCC_DISP_XO_CLK 28
+#define GCC_GP1_CLK 29
+#define GCC_GP1_CLK_SRC 30
+#define GCC_GP2_CLK 31
+#define GCC_GP2_CLK_SRC 32
+#define GCC_GP3_CLK 33
+#define GCC_GP3_CLK_SRC 34
+#define GCC_GPU_CFG_AHB_CLK 35
+#define GCC_GPU_GPLL0_CLK_SRC 36
+#define GCC_GPU_GPLL0_DIV_CLK_SRC 37
+#define GCC_GPU_MEMNOC_GFX_CLK 38
+#define GCC_GPU_SNOC_DVM_GFX_CLK 39
+#define GCC_NPU_AXI_CLK 40
+#define GCC_NPU_BWMON_AXI_CLK 41
+#define GCC_NPU_BWMON_DMA_CFG_AHB_CLK 42
+#define GCC_NPU_BWMON_DSP_CFG_AHB_CLK 43
+#define GCC_NPU_CFG_AHB_CLK 44
+#define GCC_NPU_DMA_CLK 45
+#define GCC_NPU_GPLL0_CLK_SRC 46
+#define GCC_NPU_GPLL0_DIV_CLK_SRC 47
+#define GCC_PDM2_CLK 48
+#define GCC_PDM2_CLK_SRC 49
+#define GCC_PDM_AHB_CLK 50
+#define GCC_PDM_XO4_CLK 51
+#define GCC_PRNG_AHB_CLK 52
+#define GCC_QSPI_CNOC_PERIPH_AHB_CLK 53
+#define GCC_QSPI_CORE_CLK 54
+#define GCC_QSPI_CORE_CLK_SRC 55
+#define GCC_QUPV3_WRAP0_CORE_2X_CLK 56
+#define GCC_QUPV3_WRAP0_CORE_CLK 57
+#define GCC_QUPV3_WRAP0_S0_CLK 58
+#define GCC_QUPV3_WRAP0_S0_CLK_SRC 59
+#define GCC_QUPV3_WRAP0_S1_CLK 60
+#define GCC_QUPV3_WRAP0_S1_CLK_SRC 61
+#define GCC_QUPV3_WRAP0_S2_CLK 62
+#define GCC_QUPV3_WRAP0_S2_CLK_SRC 63
+#define GCC_QUPV3_WRAP0_S3_CLK 64
+#define GCC_QUPV3_WRAP0_S3_CLK_SRC 65
+#define GCC_QUPV3_WRAP0_S4_CLK 66
+#define GCC_QUPV3_WRAP0_S4_CLK_SRC 67
+#define GCC_QUPV3_WRAP0_S5_CLK 68
+#define GCC_QUPV3_WRAP0_S5_CLK_SRC 69
+#define GCC_QUPV3_WRAP1_CORE_2X_CLK 70
+#define GCC_QUPV3_WRAP1_CORE_CLK 71
+#define GCC_QUPV3_WRAP1_S0_CLK 72
+#define GCC_QUPV3_WRAP1_S0_CLK_SRC 73
+#define GCC_QUPV3_WRAP1_S1_CLK 74
+#define GCC_QUPV3_WRAP1_S1_CLK_SRC 75
+#define GCC_QUPV3_WRAP1_S2_CLK 76
+#define GCC_QUPV3_WRAP1_S2_CLK_SRC 77
+#define GCC_QUPV3_WRAP1_S3_CLK 78
+#define GCC_QUPV3_WRAP1_S3_CLK_SRC 79
+#define GCC_QUPV3_WRAP1_S4_CLK 80
+#define GCC_QUPV3_WRAP1_S4_CLK_SRC 81
+#define GCC_QUPV3_WRAP1_S5_CLK 82
+#define GCC_QUPV3_WRAP1_S5_CLK_SRC 83
+#define GCC_QUPV3_WRAP_0_M_AHB_CLK 84
+#define GCC_QUPV3_WRAP_0_S_AHB_CLK 85
+#define GCC_QUPV3_WRAP_1_M_AHB_CLK 86
+#define GCC_QUPV3_WRAP_1_S_AHB_CLK 87
+#define GCC_SDCC1_AHB_CLK 88
+#define GCC_SDCC1_APPS_CLK 89
+#define GCC_SDCC1_APPS_CLK_SRC 90
+#define GCC_SDCC1_ICE_CORE_CLK 91
+#define GCC_SDCC1_ICE_CORE_CLK_SRC 92
+#define GCC_SDCC2_AHB_CLK 93
+#define GCC_SDCC2_APPS_CLK 94
+#define GCC_SDCC2_APPS_CLK_SRC 95
+#define GCC_SYS_NOC_CPUSS_AHB_CLK 96
+#define GCC_UFS_MEM_CLKREF_CLK 97
+#define GCC_UFS_PHY_AHB_CLK 98
+#define GCC_UFS_PHY_AXI_CLK 99
+#define GCC_UFS_PHY_AXI_CLK_SRC 100
+#define GCC_UFS_PHY_ICE_CORE_CLK 101
+#define GCC_UFS_PHY_ICE_CORE_CLK_SRC 102
+#define GCC_UFS_PHY_PHY_AUX_CLK 103
+#define GCC_UFS_PHY_PHY_AUX_CLK_SRC 104
+#define GCC_UFS_PHY_RX_SYMBOL_0_CLK 105
+#define GCC_UFS_PHY_TX_SYMBOL_0_CLK 106
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK 107
+#define GCC_UFS_PHY_UNIPRO_CORE_CLK_SRC 108
+#define GCC_USB30_PRIM_MASTER_CLK 109
+#define GCC_USB30_PRIM_MASTER_CLK_SRC 110
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK 111
+#define GCC_USB30_PRIM_MOCK_UTMI_CLK_SRC 112
+#define GCC_USB30_PRIM_SLEEP_CLK 113
+#define GCC_USB3_PRIM_CLKREF_CLK 114
+#define GCC_USB3_PRIM_PHY_AUX_CLK 115
+#define GCC_USB3_PRIM_PHY_AUX_CLK_SRC 116
+#define GCC_USB3_PRIM_PHY_COM_AUX_CLK 117
+#define GCC_USB3_PRIM_PHY_PIPE_CLK 118
+#define GCC_USB_PHY_CFG_AHB2PHY_CLK 119
+#define GCC_VIDEO_AHB_CLK 120
+#define GCC_VIDEO_AXI_CLK 121
+#define GCC_VIDEO_GPLL0_DIV_CLK_SRC 122
+#define GCC_VIDEO_THROTTLE_AXI_CLK 123
+#define GCC_VIDEO_XO_CLK 124
+
+/* GCC resets */
+#define GCC_QUSB2PHY_PRIM_BCR 0
+#define GCC_QUSB2PHY_SEC_BCR 1
+#define GCC_UFS_PHY_BCR 2
+#define GCC_USB30_PRIM_BCR 3
+#define GCC_USB3_DP_PHY_PRIM_BCR 4
+#define GCC_USB3_DP_PHY_SEC_BCR 5
+#define GCC_USB3_PHY_PRIM_BCR 6
+#define GCC_USB3_PHY_SEC_BCR 7
+#define GCC_USB3PHY_PHY_PRIM_BCR 8
+#define GCC_USB3PHY_PHY_SEC_BCR 9
+#define GCC_USB_PHY_CFG_AHB2PHY_BCR 10
+
+/* GCC GDSCRs */
+#define UFS_PHY_GDSC 0
+#define USB30_PRIM_GDSC 1
+#define HLOS1_VOTE_MMNOC_MMU_TBU_HF0_GDSC 2
+#define HLOS1_VOTE_MMNOC_MMU_TBU_SF_GDSC 3
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,gpucc-sc7180.h b/include/dt-bindings/clock/qcom,gpucc-sc7180.h
new file mode 100644
index 000000000000..0e4643b08b49
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,gpucc-sc7180.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_GPU_CC_SC7180_H
+#define _DT_BINDINGS_CLK_QCOM_GPU_CC_SC7180_H
+
+#define GPU_CC_PLL1 0
+#define GPU_CC_AHB_CLK 1
+#define GPU_CC_CRC_AHB_CLK 2
+#define GPU_CC_CX_GMU_CLK 3
+#define GPU_CC_CX_SNOC_DVM_CLK 4
+#define GPU_CC_CXO_AON_CLK 5
+#define GPU_CC_CXO_CLK 6
+#define GPU_CC_GMU_CLK_SRC 7
+
+/* CAM_CC GDSCRs */
+#define CX_GDSC 0
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,mmcc-msm8998.h b/include/dt-bindings/clock/qcom,mmcc-msm8998.h
new file mode 100644
index 000000000000..ecbafdb930aa
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,mmcc-msm8998.h
@@ -0,0 +1,210 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_MSM_MMCC_8998_H
+#define _DT_BINDINGS_CLK_MSM_MMCC_8998_H
+
+#define MMPLL0 0
+#define MMPLL0_OUT_EVEN 1
+#define MMPLL1 2
+#define MMPLL1_OUT_EVEN 3
+#define MMPLL3 4
+#define MMPLL3_OUT_EVEN 5
+#define MMPLL4 6
+#define MMPLL4_OUT_EVEN 7
+#define MMPLL5 8
+#define MMPLL5_OUT_EVEN 9
+#define MMPLL6 10
+#define MMPLL6_OUT_EVEN 11
+#define MMPLL7 12
+#define MMPLL7_OUT_EVEN 13
+#define MMPLL10 14
+#define MMPLL10_OUT_EVEN 15
+#define BYTE0_CLK_SRC 16
+#define BYTE1_CLK_SRC 17
+#define CCI_CLK_SRC 18
+#define CPP_CLK_SRC 19
+#define CSI0_CLK_SRC 20
+#define CSI1_CLK_SRC 21
+#define CSI2_CLK_SRC 22
+#define CSI3_CLK_SRC 23
+#define CSIPHY_CLK_SRC 24
+#define CSI0PHYTIMER_CLK_SRC 25
+#define CSI1PHYTIMER_CLK_SRC 26
+#define CSI2PHYTIMER_CLK_SRC 27
+#define DP_AUX_CLK_SRC 28
+#define DP_CRYPTO_CLK_SRC 29
+#define DP_LINK_CLK_SRC 30
+#define DP_PIXEL_CLK_SRC 31
+#define ESC0_CLK_SRC 32
+#define ESC1_CLK_SRC 33
+#define EXTPCLK_CLK_SRC 34
+#define FD_CORE_CLK_SRC 35
+#define HDMI_CLK_SRC 36
+#define JPEG0_CLK_SRC 37
+#define MAXI_CLK_SRC 38
+#define MCLK0_CLK_SRC 39
+#define MCLK1_CLK_SRC 40
+#define MCLK2_CLK_SRC 41
+#define MCLK3_CLK_SRC 42
+#define MDP_CLK_SRC 43
+#define VSYNC_CLK_SRC 44
+#define AHB_CLK_SRC 45
+#define AXI_CLK_SRC 46
+#define PCLK0_CLK_SRC 47
+#define PCLK1_CLK_SRC 48
+#define ROT_CLK_SRC 49
+#define VIDEO_CORE_CLK_SRC 50
+#define VIDEO_SUBCORE0_CLK_SRC 51
+#define VIDEO_SUBCORE1_CLK_SRC 52
+#define VFE0_CLK_SRC 53
+#define VFE1_CLK_SRC 54
+#define MISC_AHB_CLK 55
+#define VIDEO_CORE_CLK 56
+#define VIDEO_AHB_CLK 57
+#define VIDEO_AXI_CLK 58
+#define VIDEO_MAXI_CLK 59
+#define VIDEO_SUBCORE0_CLK 60
+#define VIDEO_SUBCORE1_CLK 61
+#define MDSS_AHB_CLK 62
+#define MDSS_HDMI_DP_AHB_CLK 63
+#define MDSS_AXI_CLK 64
+#define MDSS_PCLK0_CLK 65
+#define MDSS_PCLK1_CLK 66
+#define MDSS_MDP_CLK 67
+#define MDSS_MDP_LUT_CLK 68
+#define MDSS_EXTPCLK_CLK 69
+#define MDSS_VSYNC_CLK 70
+#define MDSS_HDMI_CLK 71
+#define MDSS_BYTE0_CLK 72
+#define MDSS_BYTE1_CLK 73
+#define MDSS_ESC0_CLK 74
+#define MDSS_ESC1_CLK 75
+#define MDSS_ROT_CLK 76
+#define MDSS_DP_LINK_CLK 77
+#define MDSS_DP_LINK_INTF_CLK 78
+#define MDSS_DP_CRYPTO_CLK 79
+#define MDSS_DP_PIXEL_CLK 80
+#define MDSS_DP_AUX_CLK 81
+#define MDSS_BYTE0_INTF_CLK 82
+#define MDSS_BYTE1_INTF_CLK 83
+#define CAMSS_CSI0PHYTIMER_CLK 84
+#define CAMSS_CSI1PHYTIMER_CLK 85
+#define CAMSS_CSI2PHYTIMER_CLK 86
+#define CAMSS_CSI0_CLK 87
+#define CAMSS_CSI0_AHB_CLK 88
+#define CAMSS_CSI0RDI_CLK 89
+#define CAMSS_CSI0PIX_CLK 90
+#define CAMSS_CSI1_CLK 91
+#define CAMSS_CSI1_AHB_CLK 92
+#define CAMSS_CSI1RDI_CLK 93
+#define CAMSS_CSI1PIX_CLK 94
+#define CAMSS_CSI2_CLK 95
+#define CAMSS_CSI2_AHB_CLK 96
+#define CAMSS_CSI2RDI_CLK 97
+#define CAMSS_CSI2PIX_CLK 98
+#define CAMSS_CSI3_CLK 99
+#define CAMSS_CSI3_AHB_CLK 100
+#define CAMSS_CSI3RDI_CLK 101
+#define CAMSS_CSI3PIX_CLK 102
+#define CAMSS_ISPIF_AHB_CLK 103
+#define CAMSS_CCI_CLK 104
+#define CAMSS_CCI_AHB_CLK 105
+#define CAMSS_MCLK0_CLK 106
+#define CAMSS_MCLK1_CLK 107
+#define CAMSS_MCLK2_CLK 108
+#define CAMSS_MCLK3_CLK 109
+#define CAMSS_TOP_AHB_CLK 110
+#define CAMSS_AHB_CLK 111
+#define CAMSS_MICRO_AHB_CLK 112
+#define CAMSS_JPEG0_CLK 113
+#define CAMSS_JPEG_AHB_CLK 114
+#define CAMSS_JPEG_AXI_CLK 115
+#define CAMSS_VFE0_AHB_CLK 116
+#define CAMSS_VFE1_AHB_CLK 117
+#define CAMSS_VFE0_CLK 118
+#define CAMSS_VFE1_CLK 119
+#define CAMSS_CPP_CLK 120
+#define CAMSS_CPP_AHB_CLK 121
+#define CAMSS_VFE_VBIF_AHB_CLK 122
+#define CAMSS_VFE_VBIF_AXI_CLK 123
+#define CAMSS_CPP_AXI_CLK 124
+#define CAMSS_CPP_VBIF_AHB_CLK 125
+#define CAMSS_CSI_VFE0_CLK 126
+#define CAMSS_CSI_VFE1_CLK 127
+#define CAMSS_VFE0_STREAM_CLK 128
+#define CAMSS_VFE1_STREAM_CLK 129
+#define CAMSS_CPHY_CSID0_CLK 130
+#define CAMSS_CPHY_CSID1_CLK 131
+#define CAMSS_CPHY_CSID2_CLK 132
+#define CAMSS_CPHY_CSID3_CLK 133
+#define CAMSS_CSIPHY0_CLK 134
+#define CAMSS_CSIPHY1_CLK 135
+#define CAMSS_CSIPHY2_CLK 136
+#define FD_CORE_CLK 137
+#define FD_CORE_UAR_CLK 138
+#define FD_AHB_CLK 139
+#define MNOC_AHB_CLK 140
+#define BIMC_SMMU_AHB_CLK 141
+#define BIMC_SMMU_AXI_CLK 142
+#define MNOC_MAXI_CLK 143
+#define VMEM_MAXI_CLK 144
+#define VMEM_AHB_CLK 145
+
+#define SPDM_BCR 0
+#define SPDM_RM_BCR 1
+#define MISC_BCR 2
+#define VIDEO_TOP_BCR 3
+#define THROTTLE_VIDEO_BCR 4
+#define MDSS_BCR 5
+#define THROTTLE_MDSS_BCR 6
+#define CAMSS_PHY0_BCR 7
+#define CAMSS_PHY1_BCR 8
+#define CAMSS_PHY2_BCR 9
+#define CAMSS_CSI0_BCR 10
+#define CAMSS_CSI0RDI_BCR 11
+#define CAMSS_CSI0PIX_BCR 12
+#define CAMSS_CSI1_BCR 13
+#define CAMSS_CSI1RDI_BCR 14
+#define CAMSS_CSI1PIX_BCR 15
+#define CAMSS_CSI2_BCR 16
+#define CAMSS_CSI2RDI_BCR 17
+#define CAMSS_CSI2PIX_BCR 18
+#define CAMSS_CSI3_BCR 19
+#define CAMSS_CSI3RDI_BCR 20
+#define CAMSS_CSI3PIX_BCR 21
+#define CAMSS_ISPIF_BCR 22
+#define CAMSS_CCI_BCR 23
+#define CAMSS_TOP_BCR 24
+#define CAMSS_AHB_BCR 25
+#define CAMSS_MICRO_BCR 26
+#define CAMSS_JPEG_BCR 27
+#define CAMSS_VFE0_BCR 28
+#define CAMSS_VFE1_BCR 29
+#define CAMSS_VFE_VBIF_BCR 30
+#define CAMSS_CPP_TOP_BCR 31
+#define CAMSS_CPP_BCR 32
+#define CAMSS_CSI_VFE0_BCR 33
+#define CAMSS_CSI_VFE1_BCR 34
+#define CAMSS_FD_BCR 35
+#define THROTTLE_CAMSS_BCR 36
+#define MNOCAHB_BCR 37
+#define MNOCAXI_BCR 38
+#define BMIC_SMMU_BCR 39
+#define MNOC_MAXI_BCR 40
+#define VMEM_BCR 41
+#define BTO_BCR 42
+
+#define VIDEO_TOP_GDSC 1
+#define VIDEO_SUBCORE0_GDSC 2
+#define VIDEO_SUBCORE1_GDSC 3
+#define MDSS_GDSC 4
+#define CAMSS_TOP_GDSC 5
+#define CAMSS_VFE0_GDSC 6
+#define CAMSS_VFE1_GDSC 7
+#define CAMSS_CPP_GDSC 8
+#define BIMC_SMMU_GDSC 9
+
+#endif
diff --git a/include/dt-bindings/clock/qcom,q6sstopcc-qcs404.h b/include/dt-bindings/clock/qcom,q6sstopcc-qcs404.h
new file mode 100644
index 000000000000..c6f5290f0914
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,q6sstopcc-qcs404.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_Q6SSTOP_QCS404_H
+#define _DT_BINDINGS_CLK_Q6SSTOP_QCS404_H
+
+#define LCC_AHBFABRIC_CBC_CLK 0
+#define LCC_Q6SS_AHBS_CBC_CLK 1
+#define LCC_Q6SS_TCM_SLAVE_CBC_CLK 2
+#define LCC_Q6SS_AHBM_CBC_CLK 3
+#define LCC_Q6SS_AXIM_CBC_CLK 4
+#define LCC_Q6SS_BCR_SLEEP_CLK 5
+#define TCSR_Q6SS_LCC_CBCR_CLK 6
+
+#define Q6SSTOP_BCR_RESET 1
+#endif
diff --git a/include/dt-bindings/clock/qcom,videocc-sc7180.h b/include/dt-bindings/clock/qcom,videocc-sc7180.h
new file mode 100644
index 000000000000..7acaf1366b13
--- /dev/null
+++ b/include/dt-bindings/clock/qcom,videocc-sc7180.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SC7180_H
+#define _DT_BINDINGS_CLK_QCOM_VIDEO_CC_SC7180_H
+
+/* VIDEO_CC clocks */
+#define VIDEO_PLL0 0
+#define VIDEO_CC_VCODEC0_AXI_CLK 1
+#define VIDEO_CC_VCODEC0_CORE_CLK 2
+#define VIDEO_CC_VENUS_AHB_CLK 3
+#define VIDEO_CC_VENUS_CLK_SRC 4
+#define VIDEO_CC_VENUS_CTL_AXI_CLK 5
+#define VIDEO_CC_VENUS_CTL_CORE_CLK 6
+#define VIDEO_CC_XO_CLK 7
+
+/* VIDEO_CC GDSCRs */
+#define VENUS_GDSC 0
+#define VCODEC0_GDSC 1
+
+#endif
diff --git a/include/dt-bindings/clock/r8a774b1-cpg-mssr.h b/include/dt-bindings/clock/r8a774b1-cpg-mssr.h
new file mode 100644
index 000000000000..1355451b74b0
--- /dev/null
+++ b/include/dt-bindings/clock/r8a774b1-cpg-mssr.h
@@ -0,0 +1,57 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A774B1_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A774B1_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a774b1 CPG Core Clocks */
+#define R8A774B1_CLK_Z 0
+#define R8A774B1_CLK_ZG 1
+#define R8A774B1_CLK_ZTR 2
+#define R8A774B1_CLK_ZTRD2 3
+#define R8A774B1_CLK_ZT 4
+#define R8A774B1_CLK_ZX 5
+#define R8A774B1_CLK_S0D1 6
+#define R8A774B1_CLK_S0D2 7
+#define R8A774B1_CLK_S0D3 8
+#define R8A774B1_CLK_S0D4 9
+#define R8A774B1_CLK_S0D6 10
+#define R8A774B1_CLK_S0D8 11
+#define R8A774B1_CLK_S0D12 12
+#define R8A774B1_CLK_S1D2 13
+#define R8A774B1_CLK_S1D4 14
+#define R8A774B1_CLK_S2D1 15
+#define R8A774B1_CLK_S2D2 16
+#define R8A774B1_CLK_S2D4 17
+#define R8A774B1_CLK_S3D1 18
+#define R8A774B1_CLK_S3D2 19
+#define R8A774B1_CLK_S3D4 20
+#define R8A774B1_CLK_LB 21
+#define R8A774B1_CLK_CL 22
+#define R8A774B1_CLK_ZB3 23
+#define R8A774B1_CLK_ZB3D2 24
+#define R8A774B1_CLK_CR 25
+#define R8A774B1_CLK_DDR 26
+#define R8A774B1_CLK_SD0H 27
+#define R8A774B1_CLK_SD0 28
+#define R8A774B1_CLK_SD1H 29
+#define R8A774B1_CLK_SD1 30
+#define R8A774B1_CLK_SD2H 31
+#define R8A774B1_CLK_SD2 32
+#define R8A774B1_CLK_SD3H 33
+#define R8A774B1_CLK_SD3 34
+#define R8A774B1_CLK_RPC 35
+#define R8A774B1_CLK_RPCD2 36
+#define R8A774B1_CLK_MSO 37
+#define R8A774B1_CLK_HDMI 38
+#define R8A774B1_CLK_CSI0 39
+#define R8A774B1_CLK_CP 40
+#define R8A774B1_CLK_CPEX 41
+#define R8A774B1_CLK_R 42
+#define R8A774B1_CLK_OSC 43
+#define R8A774B1_CLK_CANFD 44
+
+#endif /* __DT_BINDINGS_CLOCK_R8A774B1_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/r8a77961-cpg-mssr.h b/include/dt-bindings/clock/r8a77961-cpg-mssr.h
new file mode 100644
index 000000000000..7921d785546d
--- /dev/null
+++ b/include/dt-bindings/clock/r8a77961-cpg-mssr.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0+
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_CLOCK_R8A77961_CPG_MSSR_H__
+#define __DT_BINDINGS_CLOCK_R8A77961_CPG_MSSR_H__
+
+#include <dt-bindings/clock/renesas-cpg-mssr.h>
+
+/* r8a77961 CPG Core Clocks */
+#define R8A77961_CLK_Z 0
+#define R8A77961_CLK_Z2 1
+#define R8A77961_CLK_ZR 2
+#define R8A77961_CLK_ZG 3
+#define R8A77961_CLK_ZTR 4
+#define R8A77961_CLK_ZTRD2 5
+#define R8A77961_CLK_ZT 6
+#define R8A77961_CLK_ZX 7
+#define R8A77961_CLK_S0D1 8
+#define R8A77961_CLK_S0D2 9
+#define R8A77961_CLK_S0D3 10
+#define R8A77961_CLK_S0D4 11
+#define R8A77961_CLK_S0D6 12
+#define R8A77961_CLK_S0D8 13
+#define R8A77961_CLK_S0D12 14
+#define R8A77961_CLK_S1D1 15
+#define R8A77961_CLK_S1D2 16
+#define R8A77961_CLK_S1D4 17
+#define R8A77961_CLK_S2D1 18
+#define R8A77961_CLK_S2D2 19
+#define R8A77961_CLK_S2D4 20
+#define R8A77961_CLK_S3D1 21
+#define R8A77961_CLK_S3D2 22
+#define R8A77961_CLK_S3D4 23
+#define R8A77961_CLK_LB 24
+#define R8A77961_CLK_CL 25
+#define R8A77961_CLK_ZB3 26
+#define R8A77961_CLK_ZB3D2 27
+#define R8A77961_CLK_ZB3D4 28
+#define R8A77961_CLK_CR 29
+#define R8A77961_CLK_CRD2 30
+#define R8A77961_CLK_SD0H 31
+#define R8A77961_CLK_SD0 32
+#define R8A77961_CLK_SD1H 33
+#define R8A77961_CLK_SD1 34
+#define R8A77961_CLK_SD2H 35
+#define R8A77961_CLK_SD2 36
+#define R8A77961_CLK_SD3H 37
+#define R8A77961_CLK_SD3 38
+#define R8A77961_CLK_SSP2 39
+#define R8A77961_CLK_SSP1 40
+#define R8A77961_CLK_SSPRS 41
+#define R8A77961_CLK_RPC 42
+#define R8A77961_CLK_RPCD2 43
+#define R8A77961_CLK_MSO 44
+#define R8A77961_CLK_CANFD 45
+#define R8A77961_CLK_HDMI 46
+#define R8A77961_CLK_CSI0 47
+/* CLK_CSIREF was removed */
+#define R8A77961_CLK_CP 49
+#define R8A77961_CLK_CPEX 50
+#define R8A77961_CLK_R 51
+#define R8A77961_CLK_OSC 52
+
+#endif /* __DT_BINDINGS_CLOCK_R8A77961_CPG_MSSR_H__ */
diff --git a/include/dt-bindings/clock/sun50i-a64-ccu.h b/include/dt-bindings/clock/sun50i-a64-ccu.h
index a8ac4cfcdcbc..e512a1c9b0fc 100644
--- a/include/dt-bindings/clock/sun50i-a64-ccu.h
+++ b/include/dt-bindings/clock/sun50i-a64-ccu.h
@@ -46,6 +46,7 @@
#define CLK_PLL_VIDEO0 7
#define CLK_PLL_PERIPH0 11
+#define CLK_CPUX 21
#define CLK_BUS_MIPI_DSI 28
#define CLK_BUS_CE 29
#define CLK_BUS_DMA 30
diff --git a/include/dt-bindings/clock/sun6i-a31-ccu.h b/include/dt-bindings/clock/sun6i-a31-ccu.h
index c5d13340184a..39878d9dce9f 100644
--- a/include/dt-bindings/clock/sun6i-a31-ccu.h
+++ b/include/dt-bindings/clock/sun6i-a31-ccu.h
@@ -49,6 +49,8 @@
#define CLK_PLL_VIDEO1_2X 13
+#define CLK_PLL_MIPI 15
+
#define CLK_CPU 18
#define CLK_AHB1_MIPIDSI 23
diff --git a/include/dt-bindings/clock/sun8i-a23-a33-ccu.h b/include/dt-bindings/clock/sun8i-a23-a33-ccu.h
index f8222b6b2cc3..eb524d0bbd01 100644
--- a/include/dt-bindings/clock/sun8i-a23-a33-ccu.h
+++ b/include/dt-bindings/clock/sun8i-a23-a33-ccu.h
@@ -43,6 +43,8 @@
#ifndef _DT_BINDINGS_CLK_SUN8I_A23_A33_H_
#define _DT_BINDINGS_CLK_SUN8I_A23_A33_H_
+#define CLK_PLL_MIPI 13
+
#define CLK_CPUX 18
#define CLK_BUS_MIPI_DSI 23
diff --git a/include/dt-bindings/clock/sun8i-h3-ccu.h b/include/dt-bindings/clock/sun8i-h3-ccu.h
index c5f7e9a70968..30d2d15373a2 100644
--- a/include/dt-bindings/clock/sun8i-h3-ccu.h
+++ b/include/dt-bindings/clock/sun8i-h3-ccu.h
@@ -143,7 +143,7 @@
#define CLK_AVS 110
#define CLK_HDMI 111
#define CLK_HDMI_DDC 112
-
+#define CLK_MBUS 113
#define CLK_GPU 114
/* New clocks imported in H5 */
diff --git a/include/dt-bindings/clock/sun8i-r40-ccu.h b/include/dt-bindings/clock/sun8i-r40-ccu.h
index f9e15a235626..d7337b55a4ef 100644
--- a/include/dt-bindings/clock/sun8i-r40-ccu.h
+++ b/include/dt-bindings/clock/sun8i-r40-ccu.h
@@ -176,7 +176,7 @@
#define CLK_AVS 152
#define CLK_HDMI 153
#define CLK_HDMI_SLOW 154
-
+#define CLK_MBUS 155
#define CLK_DSI_DPHY 156
#define CLK_TVE0 157
#define CLK_TVE1 158
diff --git a/include/dt-bindings/clock/tegra124-car-common.h b/include/dt-bindings/clock/tegra124-car-common.h
index 4331f1df6ebe..0c4f5be0a742 100644
--- a/include/dt-bindings/clock/tegra124-car-common.h
+++ b/include/dt-bindings/clock/tegra124-car-common.h
@@ -337,7 +337,8 @@
#define TEGRA124_CLK_CLK_OUT_3_MUX 308
/* 309 */
/* 310 */
-#define TEGRA124_CLK_SOR0_LVDS 311
+#define TEGRA124_CLK_SOR0_LVDS 311 /* deprecated */
+#define TEGRA124_CLK_SOR0_OUT 311
#define TEGRA124_CLK_XUSB_SS_DIV2 312
#define TEGRA124_CLK_PLL_M_UD 313
diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h
index 6b77e721f6b1..44f60623f99b 100644
--- a/include/dt-bindings/clock/tegra210-car.h
+++ b/include/dt-bindings/clock/tegra210-car.h
@@ -308,8 +308,8 @@
#define TEGRA210_CLK_CLK_OUT_2 278
#define TEGRA210_CLK_CLK_OUT_3 279
#define TEGRA210_CLK_BLINK 280
-/* 281 */
-#define TEGRA210_CLK_SOR1_SRC 282
+#define TEGRA210_CLK_SOR0_LVDS 281 /* deprecated */
+#define TEGRA210_CLK_SOR0_OUT 281
#define TEGRA210_CLK_SOR1_OUT 282
/* 283 */
#define TEGRA210_CLK_XUSB_HOST_SRC 284
@@ -391,7 +391,7 @@
#define TEGRA210_CLK_CLK_OUT_3_MUX 358
#define TEGRA210_CLK_DSIA_MUX 359
#define TEGRA210_CLK_DSIB_MUX 360
-#define TEGRA210_CLK_SOR0_LVDS 361
+/* 361 */
#define TEGRA210_CLK_XUSB_SS_DIV2 362
#define TEGRA210_CLK_PLL_M_UD 363
diff --git a/include/dt-bindings/clk/ti-dra7-atl.h b/include/dt-bindings/clock/ti-dra7-atl.h
index 42dd4164f6f4..42dd4164f6f4 100644
--- a/include/dt-bindings/clk/ti-dra7-atl.h
+++ b/include/dt-bindings/clock/ti-dra7-atl.h
diff --git a/include/dt-bindings/clock/x1000-cgu.h b/include/dt-bindings/clock/x1000-cgu.h
new file mode 100644
index 000000000000..bbaebaf7adb9
--- /dev/null
+++ b/include/dt-bindings/clock/x1000-cgu.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * This header provides clock numbers for the ingenic,x1000-cgu DT binding.
+ *
+ * They are roughly ordered as:
+ * - external clocks
+ * - PLLs
+ * - muxes/dividers in the order they appear in the x1000 programmers manual
+ * - gates in order of their bit in the CLKGR* registers
+ */
+
+#ifndef __DT_BINDINGS_CLOCK_X1000_CGU_H__
+#define __DT_BINDINGS_CLOCK_X1000_CGU_H__
+
+#define X1000_CLK_EXCLK 0
+#define X1000_CLK_RTCLK 1
+#define X1000_CLK_APLL 2
+#define X1000_CLK_MPLL 3
+#define X1000_CLK_SCLKA 4
+#define X1000_CLK_CPUMUX 5
+#define X1000_CLK_CPU 6
+#define X1000_CLK_L2CACHE 7
+#define X1000_CLK_AHB0 8
+#define X1000_CLK_AHB2PMUX 9
+#define X1000_CLK_AHB2 10
+#define X1000_CLK_PCLK 11
+#define X1000_CLK_DDR 12
+#define X1000_CLK_MAC 13
+#define X1000_CLK_MSCMUX 14
+#define X1000_CLK_MSC0 15
+#define X1000_CLK_MSC1 16
+#define X1000_CLK_SSIPLL 17
+#define X1000_CLK_SSIMUX 18
+#define X1000_CLK_SFC 19
+#define X1000_CLK_I2C0 20
+#define X1000_CLK_I2C1 21
+#define X1000_CLK_I2C2 22
+#define X1000_CLK_UART0 23
+#define X1000_CLK_UART1 24
+#define X1000_CLK_UART2 25
+#define X1000_CLK_SSI 26
+#define X1000_CLK_PDMA 27
+
+#endif /* __DT_BINDINGS_CLOCK_X1000_CGU_H__ */
diff --git a/include/dt-bindings/clock/xlnx-versal-clk.h b/include/dt-bindings/clock/xlnx-versal-clk.h
new file mode 100644
index 000000000000..264d634d226e
--- /dev/null
+++ b/include/dt-bindings/clock/xlnx-versal-clk.h
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Xilinx Inc.
+ *
+ */
+
+#ifndef _DT_BINDINGS_CLK_VERSAL_H
+#define _DT_BINDINGS_CLK_VERSAL_H
+
+#define PMC_PLL 1
+#define APU_PLL 2
+#define RPU_PLL 3
+#define CPM_PLL 4
+#define NOC_PLL 5
+#define PLL_MAX 6
+#define PMC_PRESRC 7
+#define PMC_POSTCLK 8
+#define PMC_PLL_OUT 9
+#define PPLL 10
+#define NOC_PRESRC 11
+#define NOC_POSTCLK 12
+#define NOC_PLL_OUT 13
+#define NPLL 14
+#define APU_PRESRC 15
+#define APU_POSTCLK 16
+#define APU_PLL_OUT 17
+#define APLL 18
+#define RPU_PRESRC 19
+#define RPU_POSTCLK 20
+#define RPU_PLL_OUT 21
+#define RPLL 22
+#define CPM_PRESRC 23
+#define CPM_POSTCLK 24
+#define CPM_PLL_OUT 25
+#define CPLL 26
+#define PPLL_TO_XPD 27
+#define NPLL_TO_XPD 28
+#define APLL_TO_XPD 29
+#define RPLL_TO_XPD 30
+#define EFUSE_REF 31
+#define SYSMON_REF 32
+#define IRO_SUSPEND_REF 33
+#define USB_SUSPEND 34
+#define SWITCH_TIMEOUT 35
+#define RCLK_PMC 36
+#define RCLK_LPD 37
+#define WDT 38
+#define TTC0 39
+#define TTC1 40
+#define TTC2 41
+#define TTC3 42
+#define GEM_TSU 43
+#define GEM_TSU_LB 44
+#define MUXED_IRO_DIV2 45
+#define MUXED_IRO_DIV4 46
+#define PSM_REF 47
+#define GEM0_RX 48
+#define GEM0_TX 49
+#define GEM1_RX 50
+#define GEM1_TX 51
+#define CPM_CORE_REF 52
+#define CPM_LSBUS_REF 53
+#define CPM_DBG_REF 54
+#define CPM_AUX0_REF 55
+#define CPM_AUX1_REF 56
+#define QSPI_REF 57
+#define OSPI_REF 58
+#define SDIO0_REF 59
+#define SDIO1_REF 60
+#define PMC_LSBUS_REF 61
+#define I2C_REF 62
+#define TEST_PATTERN_REF 63
+#define DFT_OSC_REF 64
+#define PMC_PL0_REF 65
+#define PMC_PL1_REF 66
+#define PMC_PL2_REF 67
+#define PMC_PL3_REF 68
+#define CFU_REF 69
+#define SPARE_REF 70
+#define NPI_REF 71
+#define HSM0_REF 72
+#define HSM1_REF 73
+#define SD_DLL_REF 74
+#define FPD_TOP_SWITCH 75
+#define FPD_LSBUS 76
+#define ACPU 77
+#define DBG_TRACE 78
+#define DBG_FPD 79
+#define LPD_TOP_SWITCH 80
+#define ADMA 81
+#define LPD_LSBUS 82
+#define CPU_R5 83
+#define CPU_R5_CORE 84
+#define CPU_R5_OCM 85
+#define CPU_R5_OCM2 86
+#define IOU_SWITCH 87
+#define GEM0_REF 88
+#define GEM1_REF 89
+#define GEM_TSU_REF 90
+#define USB0_BUS_REF 91
+#define UART0_REF 92
+#define UART1_REF 93
+#define SPI0_REF 94
+#define SPI1_REF 95
+#define CAN0_REF 96
+#define CAN1_REF 97
+#define I2C0_REF 98
+#define I2C1_REF 99
+#define DBG_LPD 100
+#define TIMESTAMP_REF 101
+#define DBG_TSTMP 102
+#define CPM_TOPSW_REF 103
+#define USB3_DUAL_REF 104
+#define OUTCLK_MAX 105
+#define REF_CLK 106
+#define PL_ALT_REF_CLK 107
+#define MUXED_IRO 108
+#define PL_EXT 109
+#define PL_LB 110
+#define MIO_50_OR_51 111
+#define MIO_24_OR_25 112
+
+#endif
diff --git a/include/dt-bindings/display/sdtv-standards.h b/include/dt-bindings/display/sdtv-standards.h
new file mode 100644
index 000000000000..fbc1a3db2ea7
--- /dev/null
+++ b/include/dt-bindings/display/sdtv-standards.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only or X11 */
+/*
+ * Copyright 2019 Pengutronix, Marco Felsch <kernel@pengutronix.de>
+ */
+
+#ifndef _DT_BINDINGS_DISPLAY_SDTV_STDS_H
+#define _DT_BINDINGS_DISPLAY_SDTV_STDS_H
+
+/*
+ * Attention: Keep the SDTV_STD_* bit definitions in sync with
+ * include/uapi/linux/videodev2.h V4L2_STD_* bit definitions.
+ */
+/* One bit for each standard */
+#define SDTV_STD_PAL_B 0x00000001
+#define SDTV_STD_PAL_B1 0x00000002
+#define SDTV_STD_PAL_G 0x00000004
+#define SDTV_STD_PAL_H 0x00000008
+#define SDTV_STD_PAL_I 0x00000010
+#define SDTV_STD_PAL_D 0x00000020
+#define SDTV_STD_PAL_D1 0x00000040
+#define SDTV_STD_PAL_K 0x00000080
+
+#define SDTV_STD_PAL (SDTV_STD_PAL_B | \
+ SDTV_STD_PAL_B1 | \
+ SDTV_STD_PAL_G | \
+ SDTV_STD_PAL_H | \
+ SDTV_STD_PAL_I | \
+ SDTV_STD_PAL_D | \
+ SDTV_STD_PAL_D1 | \
+ SDTV_STD_PAL_K)
+
+#define SDTV_STD_PAL_M 0x00000100
+#define SDTV_STD_PAL_N 0x00000200
+#define SDTV_STD_PAL_Nc 0x00000400
+#define SDTV_STD_PAL_60 0x00000800
+
+#define SDTV_STD_NTSC_M 0x00001000 /* BTSC */
+#define SDTV_STD_NTSC_M_JP 0x00002000 /* EIA-J */
+#define SDTV_STD_NTSC_443 0x00004000
+#define SDTV_STD_NTSC_M_KR 0x00008000 /* FM A2 */
+
+#define SDTV_STD_NTSC (SDTV_STD_NTSC_M | \
+ SDTV_STD_NTSC_M_JP | \
+ SDTV_STD_NTSC_M_KR)
+
+#define SDTV_STD_SECAM_B 0x00010000
+#define SDTV_STD_SECAM_D 0x00020000
+#define SDTV_STD_SECAM_G 0x00040000
+#define SDTV_STD_SECAM_H 0x00080000
+#define SDTV_STD_SECAM_K 0x00100000
+#define SDTV_STD_SECAM_K1 0x00200000
+#define SDTV_STD_SECAM_L 0x00400000
+#define SDTV_STD_SECAM_LC 0x00800000
+
+#define SDTV_STD_SECAM (SDTV_STD_SECAM_B | \
+ SDTV_STD_SECAM_D | \
+ SDTV_STD_SECAM_G | \
+ SDTV_STD_SECAM_H | \
+ SDTV_STD_SECAM_K | \
+ SDTV_STD_SECAM_K1 | \
+ SDTV_STD_SECAM_L | \
+ SDTV_STD_SECAM_LC)
+
+/* Standards for Countries with 60Hz Line frequency */
+#define SDTV_STD_525_60 (SDTV_STD_PAL_M | \
+ SDTV_STD_PAL_60 | \
+ SDTV_STD_NTSC | \
+ SDTV_STD_NTSC_443)
+
+/* Standards for Countries with 50Hz Line frequency */
+#define SDTV_STD_625_50 (SDTV_STD_PAL | \
+ SDTV_STD_PAL_N | \
+ SDTV_STD_PAL_Nc | \
+ SDTV_STD_SECAM)
+
+#endif /* _DT_BINDINGS_DISPLAY_SDTV_STDS_H */
diff --git a/include/dt-bindings/dma/x1000-dma.h b/include/dt-bindings/dma/x1000-dma.h
new file mode 100644
index 000000000000..401e1656e696
--- /dev/null
+++ b/include/dt-bindings/dma/x1000-dma.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * This header provides macros for X1000 DMA bindings.
+ *
+ * Copyright (c) 2019 Zhou Yanjie <zhouyanjie@zoho.com>
+ */
+
+#ifndef __DT_BINDINGS_DMA_X1000_DMA_H__
+#define __DT_BINDINGS_DMA_X1000_DMA_H__
+
+/*
+ * Request type numbers for the X1000 DMA controller (written to the DRTn
+ * register for the channel).
+ */
+#define X1000_DMA_DMIC_RX 0x5
+#define X1000_DMA_I2S0_TX 0x6
+#define X1000_DMA_I2S0_RX 0x7
+#define X1000_DMA_AUTO 0x8
+#define X1000_DMA_UART2_TX 0x10
+#define X1000_DMA_UART2_RX 0x11
+#define X1000_DMA_UART1_TX 0x12
+#define X1000_DMA_UART1_RX 0x13
+#define X1000_DMA_UART0_TX 0x14
+#define X1000_DMA_UART0_RX 0x15
+#define X1000_DMA_SSI0_TX 0x16
+#define X1000_DMA_SSI0_RX 0x17
+#define X1000_DMA_MSC0_TX 0x1a
+#define X1000_DMA_MSC0_RX 0x1b
+#define X1000_DMA_MSC1_TX 0x1c
+#define X1000_DMA_MSC1_RX 0x1d
+#define X1000_DMA_PCM0_TX 0x20
+#define X1000_DMA_PCM0_RX 0x21
+#define X1000_DMA_SMB0_TX 0x24
+#define X1000_DMA_SMB0_RX 0x25
+#define X1000_DMA_SMB1_TX 0x26
+#define X1000_DMA_SMB1_RX 0x27
+#define X1000_DMA_SMB2_TX 0x28
+#define X1000_DMA_SMB2_RX 0x29
+
+#endif /* __DT_BINDINGS_DMA_X1000_DMA_H__ */
diff --git a/include/dt-bindings/dma/x1830-dma.h b/include/dt-bindings/dma/x1830-dma.h
new file mode 100644
index 000000000000..35bcb8966ea4
--- /dev/null
+++ b/include/dt-bindings/dma/x1830-dma.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * This header provides macros for X1830 DMA bindings.
+ *
+ * Copyright (c) 2019 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
+ */
+
+#ifndef __DT_BINDINGS_DMA_X1830_DMA_H__
+#define __DT_BINDINGS_DMA_X1830_DMA_H__
+
+/*
+ * Request type numbers for the X1830 DMA controller (written to the DRTn
+ * register for the channel).
+ */
+#define X1830_DMA_I2S0_TX 0x6
+#define X1830_DMA_I2S0_RX 0x7
+#define X1830_DMA_AUTO 0x8
+#define X1830_DMA_SADC_RX 0x9
+#define X1830_DMA_UART1_TX 0x12
+#define X1830_DMA_UART1_RX 0x13
+#define X1830_DMA_UART0_TX 0x14
+#define X1830_DMA_UART0_RX 0x15
+#define X1830_DMA_SSI0_TX 0x16
+#define X1830_DMA_SSI0_RX 0x17
+#define X1830_DMA_SSI1_TX 0x18
+#define X1830_DMA_SSI1_RX 0x19
+#define X1830_DMA_MSC0_TX 0x1a
+#define X1830_DMA_MSC0_RX 0x1b
+#define X1830_DMA_MSC1_TX 0x1c
+#define X1830_DMA_MSC1_RX 0x1d
+#define X1830_DMA_DMIC_RX 0x21
+#define X1830_DMA_SMB0_TX 0x24
+#define X1830_DMA_SMB0_RX 0x25
+#define X1830_DMA_SMB1_TX 0x26
+#define X1830_DMA_SMB1_RX 0x27
+#define X1830_DMA_DES_TX 0x2e
+#define X1830_DMA_DES_RX 0x2f
+
+#endif /* __DT_BINDINGS_DMA_X1830_DMA_H__ */
diff --git a/include/dt-bindings/gpio/meson-a1-gpio.h b/include/dt-bindings/gpio/meson-a1-gpio.h
new file mode 100644
index 000000000000..40e57a5ff1db
--- /dev/null
+++ b/include/dt-bindings/gpio/meson-a1-gpio.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT) */
+/*
+ * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
+ * Author: Qianggui Song <qianggui.song@amlogic.com>
+ */
+
+#ifndef _DT_BINDINGS_MESON_A1_GPIO_H
+#define _DT_BINDINGS_MESON_A1_GPIO_H
+
+#define GPIOP_0 0
+#define GPIOP_1 1
+#define GPIOP_2 2
+#define GPIOP_3 3
+#define GPIOP_4 4
+#define GPIOP_5 5
+#define GPIOP_6 6
+#define GPIOP_7 7
+#define GPIOP_8 8
+#define GPIOP_9 9
+#define GPIOP_10 10
+#define GPIOP_11 11
+#define GPIOP_12 12
+#define GPIOB_0 13
+#define GPIOB_1 14
+#define GPIOB_2 15
+#define GPIOB_3 16
+#define GPIOB_4 17
+#define GPIOB_5 18
+#define GPIOB_6 19
+#define GPIOX_0 20
+#define GPIOX_1 21
+#define GPIOX_2 22
+#define GPIOX_3 23
+#define GPIOX_4 24
+#define GPIOX_5 25
+#define GPIOX_6 26
+#define GPIOX_7 27
+#define GPIOX_8 28
+#define GPIOX_9 29
+#define GPIOX_10 30
+#define GPIOX_11 31
+#define GPIOX_12 32
+#define GPIOX_13 33
+#define GPIOX_14 34
+#define GPIOX_15 35
+#define GPIOX_16 36
+#define GPIOF_0 37
+#define GPIOF_1 38
+#define GPIOF_2 39
+#define GPIOF_3 40
+#define GPIOF_4 41
+#define GPIOF_5 42
+#define GPIOF_6 43
+#define GPIOF_7 44
+#define GPIOF_8 45
+#define GPIOF_9 46
+#define GPIOF_10 47
+#define GPIOF_11 48
+#define GPIOF_12 49
+#define GPIOA_0 50
+#define GPIOA_1 51
+#define GPIOA_2 52
+#define GPIOA_3 53
+#define GPIOA_4 54
+#define GPIOA_5 55
+#define GPIOA_6 56
+#define GPIOA_7 57
+#define GPIOA_8 58
+#define GPIOA_9 59
+#define GPIOA_10 60
+#define GPIOA_11 61
+
+#endif /* _DT_BINDINGS_MESON_A1_GPIO_H */
diff --git a/include/dt-bindings/iio/adc/ingenic,adc.h b/include/dt-bindings/iio/adc/ingenic,adc.h
index 82706b2706ac..42f871ab3272 100644
--- a/include/dt-bindings/iio/adc/ingenic,adc.h
+++ b/include/dt-bindings/iio/adc/ingenic,adc.h
@@ -6,5 +6,6 @@
/* ADC channel idx. */
#define INGENIC_ADC_AUX 0
#define INGENIC_ADC_BATTERY 1
+#define INGENIC_ADC_AUX2 2
#endif
diff --git a/include/dt-bindings/interconnect/qcom,msm8916.h b/include/dt-bindings/interconnect/qcom,msm8916.h
new file mode 100644
index 000000000000..359a75feb198
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8916.h
@@ -0,0 +1,100 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Qualcomm interconnect IDs
+ *
+ * Copyright (c) 2019, Linaro Ltd.
+ * Author: Georgi Djakov <georgi.djakov@linaro.org>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8916_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8916_H
+
+#define BIMC_SNOC_SLV 0
+#define MASTER_JPEG 1
+#define MASTER_MDP_PORT0 2
+#define MASTER_QDSS_BAM 3
+#define MASTER_QDSS_ETR 4
+#define MASTER_SNOC_CFG 5
+#define MASTER_VFE 6
+#define MASTER_VIDEO_P0 7
+#define SNOC_MM_INT_0 8
+#define SNOC_MM_INT_1 9
+#define SNOC_MM_INT_2 10
+#define SNOC_MM_INT_BIMC 11
+#define PCNOC_SNOC_SLV 12
+#define SLAVE_APSS 13
+#define SLAVE_CATS_128 14
+#define SLAVE_OCMEM_64 15
+#define SLAVE_IMEM 16
+#define SLAVE_QDSS_STM 17
+#define SLAVE_SRVC_SNOC 18
+#define SNOC_BIMC_0_MAS 19
+#define SNOC_BIMC_1_MAS 20
+#define SNOC_INT_0 21
+#define SNOC_INT_1 22
+#define SNOC_INT_BIMC 23
+#define SNOC_PCNOC_MAS 24
+#define SNOC_QDSS_INT 25
+
+#define BIMC_SNOC_MAS 0
+#define MASTER_AMPSS_M0 1
+#define MASTER_GRAPHICS_3D 2
+#define MASTER_TCU0 3
+#define MASTER_TCU1 4
+#define SLAVE_AMPSS_L2 5
+#define SLAVE_EBI_CH0 6
+#define SNOC_BIMC_0_SLV 7
+#define SNOC_BIMC_1_SLV 8
+
+#define MASTER_BLSP_1 0
+#define MASTER_DEHR 1
+#define MASTER_LPASS 2
+#define MASTER_CRYPTO_CORE0 3
+#define MASTER_SDCC_1 4
+#define MASTER_SDCC_2 5
+#define MASTER_SPDM 6
+#define MASTER_USB_HS 7
+#define PCNOC_INT_0 8
+#define PCNOC_INT_1 9
+#define PCNOC_MAS_0 10
+#define PCNOC_MAS_1 11
+#define PCNOC_SLV_0 12
+#define PCNOC_SLV_1 13
+#define PCNOC_SLV_2 14
+#define PCNOC_SLV_3 15
+#define PCNOC_SLV_4 16
+#define PCNOC_SLV_8 17
+#define PCNOC_SLV_9 18
+#define PCNOC_SNOC_MAS 19
+#define SLAVE_BIMC_CFG 20
+#define SLAVE_BLSP_1 21
+#define SLAVE_BOOT_ROM 22
+#define SLAVE_CAMERA_CFG 23
+#define SLAVE_CLK_CTL 24
+#define SLAVE_CRYPTO_0_CFG 25
+#define SLAVE_DEHR_CFG 26
+#define SLAVE_DISPLAY_CFG 27
+#define SLAVE_GRAPHICS_3D_CFG 28
+#define SLAVE_IMEM_CFG 29
+#define SLAVE_LPASS 30
+#define SLAVE_MPM 31
+#define SLAVE_MSG_RAM 32
+#define SLAVE_MSS 33
+#define SLAVE_PDM 34
+#define SLAVE_PMIC_ARB 35
+#define SLAVE_PCNOC_CFG 36
+#define SLAVE_PRNG 37
+#define SLAVE_QDSS_CFG 38
+#define SLAVE_RBCPR_CFG 39
+#define SLAVE_SDCC_1 40
+#define SLAVE_SDCC_2 41
+#define SLAVE_SECURITY 42
+#define SLAVE_SNOC_CFG 43
+#define SLAVE_SPDM 44
+#define SLAVE_TCSR 45
+#define SLAVE_TLMM 46
+#define SLAVE_USB_HS 47
+#define SLAVE_VENUS_CFG 48
+#define SNOC_PCNOC_SLV 49
+
+#endif
diff --git a/include/dt-bindings/interconnect/qcom,msm8974.h b/include/dt-bindings/interconnect/qcom,msm8974.h
new file mode 100644
index 000000000000..e65ae27ffff2
--- /dev/null
+++ b/include/dt-bindings/interconnect/qcom,msm8974.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) */
+/*
+ * Qualcomm msm8974 interconnect IDs
+ *
+ * Copyright (c) 2019 Brian Masney <masneyb@onstation.org>
+ */
+
+#ifndef __DT_BINDINGS_INTERCONNECT_QCOM_MSM8974_H
+#define __DT_BINDINGS_INTERCONNECT_QCOM_MSM8974_H
+
+#define BIMC_MAS_AMPSS_M0 0
+#define BIMC_MAS_AMPSS_M1 1
+#define BIMC_MAS_MSS_PROC 2
+#define BIMC_TO_MNOC 3
+#define BIMC_TO_SNOC 4
+#define BIMC_SLV_EBI_CH0 5
+#define BIMC_SLV_AMPSS_L2 6
+
+#define CNOC_MAS_RPM_INST 0
+#define CNOC_MAS_RPM_DATA 1
+#define CNOC_MAS_RPM_SYS 2
+#define CNOC_MAS_DEHR 3
+#define CNOC_MAS_QDSS_DAP 4
+#define CNOC_MAS_SPDM 5
+#define CNOC_MAS_TIC 6
+#define CNOC_SLV_CLK_CTL 7
+#define CNOC_SLV_CNOC_MSS 8
+#define CNOC_SLV_SECURITY 9
+#define CNOC_SLV_TCSR 10
+#define CNOC_SLV_TLMM 11
+#define CNOC_SLV_CRYPTO_0_CFG 12
+#define CNOC_SLV_CRYPTO_1_CFG 13
+#define CNOC_SLV_IMEM_CFG 14
+#define CNOC_SLV_MESSAGE_RAM 15
+#define CNOC_SLV_BIMC_CFG 16
+#define CNOC_SLV_BOOT_ROM 17
+#define CNOC_SLV_PMIC_ARB 18
+#define CNOC_SLV_SPDM_WRAPPER 19
+#define CNOC_SLV_DEHR_CFG 20
+#define CNOC_SLV_MPM 21
+#define CNOC_SLV_QDSS_CFG 22
+#define CNOC_SLV_RBCPR_CFG 23
+#define CNOC_SLV_RBCPR_QDSS_APU_CFG 24
+#define CNOC_TO_SNOC 25
+#define CNOC_SLV_CNOC_ONOC_CFG 26
+#define CNOC_SLV_CNOC_MNOC_MMSS_CFG 27
+#define CNOC_SLV_CNOC_MNOC_CFG 28
+#define CNOC_SLV_PNOC_CFG 29
+#define CNOC_SLV_SNOC_MPU_CFG 30
+#define CNOC_SLV_SNOC_CFG 31
+#define CNOC_SLV_EBI1_DLL_CFG 32
+#define CNOC_SLV_PHY_APU_CFG 33
+#define CNOC_SLV_EBI1_PHY_CFG 34
+#define CNOC_SLV_RPM 35
+#define CNOC_SLV_SERVICE_CNOC 36
+
+#define MNOC_MAS_GRAPHICS_3D 0
+#define MNOC_MAS_JPEG 1
+#define MNOC_MAS_MDP_PORT0 2
+#define MNOC_MAS_VIDEO_P0 3
+#define MNOC_MAS_VIDEO_P1 4
+#define MNOC_MAS_VFE 5
+#define MNOC_TO_CNOC 6
+#define MNOC_TO_BIMC 7
+#define MNOC_SLV_CAMERA_CFG 8
+#define MNOC_SLV_DISPLAY_CFG 9
+#define MNOC_SLV_OCMEM_CFG 10
+#define MNOC_SLV_CPR_CFG 11
+#define MNOC_SLV_CPR_XPU_CFG 12
+#define MNOC_SLV_MISC_CFG 13
+#define MNOC_SLV_MISC_XPU_CFG 14
+#define MNOC_SLV_VENUS_CFG 15
+#define MNOC_SLV_GRAPHICS_3D_CFG 16
+#define MNOC_SLV_MMSS_CLK_CFG 17
+#define MNOC_SLV_MMSS_CLK_XPU_CFG 18
+#define MNOC_SLV_MNOC_MPU_CFG 19
+#define MNOC_SLV_ONOC_MPU_CFG 20
+#define MNOC_SLV_SERVICE_MNOC 21
+
+#define OCMEM_NOC_TO_OCMEM_VNOC 0
+#define OCMEM_MAS_JPEG_OCMEM 1
+#define OCMEM_MAS_MDP_OCMEM 2
+#define OCMEM_MAS_VIDEO_P0_OCMEM 3
+#define OCMEM_MAS_VIDEO_P1_OCMEM 4
+#define OCMEM_MAS_VFE_OCMEM 5
+#define OCMEM_MAS_CNOC_ONOC_CFG 6
+#define OCMEM_SLV_SERVICE_ONOC 7
+#define OCMEM_VNOC_TO_SNOC 8
+#define OCMEM_VNOC_TO_OCMEM_NOC 9
+#define OCMEM_VNOC_MAS_GFX3D 10
+#define OCMEM_SLV_OCMEM 11
+
+#define PNOC_MAS_PNOC_CFG 0
+#define PNOC_MAS_SDCC_1 1
+#define PNOC_MAS_SDCC_3 2
+#define PNOC_MAS_SDCC_4 3
+#define PNOC_MAS_SDCC_2 4
+#define PNOC_MAS_TSIF 5
+#define PNOC_MAS_BAM_DMA 6
+#define PNOC_MAS_BLSP_2 7
+#define PNOC_MAS_USB_HSIC 8
+#define PNOC_MAS_BLSP_1 9
+#define PNOC_MAS_USB_HS 10
+#define PNOC_TO_SNOC 11
+#define PNOC_SLV_SDCC_1 12
+#define PNOC_SLV_SDCC_3 13
+#define PNOC_SLV_SDCC_2 14
+#define PNOC_SLV_SDCC_4 15
+#define PNOC_SLV_TSIF 16
+#define PNOC_SLV_BAM_DMA 17
+#define PNOC_SLV_BLSP_2 18
+#define PNOC_SLV_USB_HSIC 19
+#define PNOC_SLV_BLSP_1 20
+#define PNOC_SLV_USB_HS 21
+#define PNOC_SLV_PDM 22
+#define PNOC_SLV_PERIPH_APU_CFG 23
+#define PNOC_SLV_PNOC_MPU_CFG 24
+#define PNOC_SLV_PRNG 25
+#define PNOC_SLV_SERVICE_PNOC 26
+
+#define SNOC_MAS_LPASS_AHB 0
+#define SNOC_MAS_QDSS_BAM 1
+#define SNOC_MAS_SNOC_CFG 2
+#define SNOC_TO_BIMC 3
+#define SNOC_TO_CNOC 4
+#define SNOC_TO_PNOC 5
+#define SNOC_TO_OCMEM_VNOC 6
+#define SNOC_MAS_CRYPTO_CORE0 7
+#define SNOC_MAS_CRYPTO_CORE1 8
+#define SNOC_MAS_LPASS_PROC 9
+#define SNOC_MAS_MSS 10
+#define SNOC_MAS_MSS_NAV 11
+#define SNOC_MAS_OCMEM_DMA 12
+#define SNOC_MAS_WCSS 13
+#define SNOC_MAS_QDSS_ETR 14
+#define SNOC_MAS_USB3 15
+#define SNOC_SLV_AMPSS 16
+#define SNOC_SLV_LPASS 17
+#define SNOC_SLV_USB3 18
+#define SNOC_SLV_WCSS 19
+#define SNOC_SLV_OCIMEM 20
+#define SNOC_SLV_SNOC_OCMEM 21
+#define SNOC_SLV_SERVICE_SNOC 22
+#define SNOC_SLV_QDSS_STM 23
+
+#endif
diff --git a/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h b/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
new file mode 100644
index 000000000000..f315d5a7f5ee
--- /dev/null
+++ b/include/dt-bindings/interrupt-controller/aspeed-scu-ic.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+
+#ifndef _DT_BINDINGS_INTERRUPT_CONTROLLER_ASPEED_SCU_IC_H_
+#define _DT_BINDINGS_INTERRUPT_CONTROLLER_ASPEED_SCU_IC_H_
+
+#define ASPEED_SCU_IC_VGA_CURSOR_CHANGE 0
+#define ASPEED_SCU_IC_VGA_SCRATCH_REG_CHANGE 1
+
+#define ASPEED_AST2500_SCU_IC_PCIE_RESET_LO_TO_HI 2
+#define ASPEED_AST2500_SCU_IC_PCIE_RESET_HI_TO_LO 3
+#define ASPEED_AST2500_SCU_IC_LPC_RESET_LO_TO_HI 4
+#define ASPEED_AST2500_SCU_IC_LPC_RESET_HI_TO_LO 5
+#define ASPEED_AST2500_SCU_IC_ISSUE_MSI 6
+
+#define ASPEED_AST2600_SCU_IC0_PCIE_PERST_LO_TO_HI 2
+#define ASPEED_AST2600_SCU_IC0_PCIE_PERST_HI_TO_LO 3
+#define ASPEED_AST2600_SCU_IC0_PCIE_RCRST_LO_TO_HI 4
+#define ASPEED_AST2600_SCU_IC0_PCIE_RCRST_HI_TO_LO 5
+
+#define ASPEED_AST2600_SCU_IC1_LPC_RESET_LO_TO_HI 0
+#define ASPEED_AST2600_SCU_IC1_LPC_RESET_HI_TO_LO 1
+
+#endif /* _DT_BINDINGS_INTERRUPT_CONTROLLER_ASPEED_SCU_IC_H_ */
diff --git a/include/dt-bindings/media/tvp5150.h b/include/dt-bindings/media/tvp5150.h
index 01eedf4985b8..dda00c038530 100644
--- a/include/dt-bindings/media/tvp5150.h
+++ b/include/dt-bindings/media/tvp5150.h
@@ -14,8 +14,6 @@
#define TVP5150_COMPOSITE1 1
#define TVP5150_SVIDEO 2
-#define TVP5150_INPUT_NUM 3
-
/* TVP5150 HW outputs */
#define TVP5150_NORMAL 0
#define TVP5150_BLACK_SCREEN 1
diff --git a/include/dt-bindings/memory/tegra186-mc.h b/include/dt-bindings/memory/tegra186-mc.h
index 64813536aec9..82a1e27f7357 100644
--- a/include/dt-bindings/memory/tegra186-mc.h
+++ b/include/dt-bindings/memory/tegra186-mc.h
@@ -108,4 +108,143 @@
#define TEGRA186_SID_SE_VM6 0x4e
#define TEGRA186_SID_SE_VM7 0x4f
+/*
+ * memory client IDs
+ */
+
+/* Misses from System Memory Management Unit (SMMU) Page Table Cache (PTC) */
+#define TEGRA186_MEMORY_CLIENT_PTCR 0x00
+/* PCIE reads */
+#define TEGRA186_MEMORY_CLIENT_AFIR 0x0e
+/* High-definition audio (HDA) reads */
+#define TEGRA186_MEMORY_CLIENT_HDAR 0x15
+/* Host channel data reads */
+#define TEGRA186_MEMORY_CLIENT_HOST1XDMAR 0x16
+#define TEGRA186_MEMORY_CLIENT_NVENCSRD 0x1c
+/* SATA reads */
+#define TEGRA186_MEMORY_CLIENT_SATAR 0x1f
+/* Reads from Cortex-A9 4 CPU cores via the L2 cache */
+#define TEGRA186_MEMORY_CLIENT_MPCORER 0x27
+#define TEGRA186_MEMORY_CLIENT_NVENCSWR 0x2b
+/* PCIE writes */
+#define TEGRA186_MEMORY_CLIENT_AFIW 0x31
+/* High-definition audio (HDA) writes */
+#define TEGRA186_MEMORY_CLIENT_HDAW 0x35
+/* Writes from Cortex-A9 4 CPU cores via the L2 cache */
+#define TEGRA186_MEMORY_CLIENT_MPCOREW 0x39
+/* SATA writes */
+#define TEGRA186_MEMORY_CLIENT_SATAW 0x3d
+/* ISP Read client for Crossbar A */
+#define TEGRA186_MEMORY_CLIENT_ISPRA 0x44
+/* ISP Write client for Crossbar A */
+#define TEGRA186_MEMORY_CLIENT_ISPWA 0x46
+/* ISP Write client Crossbar B */
+#define TEGRA186_MEMORY_CLIENT_ISPWB 0x47
+/* XUSB reads */
+#define TEGRA186_MEMORY_CLIENT_XUSB_HOSTR 0x4a
+/* XUSB_HOST writes */
+#define TEGRA186_MEMORY_CLIENT_XUSB_HOSTW 0x4b
+/* XUSB reads */
+#define TEGRA186_MEMORY_CLIENT_XUSB_DEVR 0x4c
+/* XUSB_DEV writes */
+#define TEGRA186_MEMORY_CLIENT_XUSB_DEVW 0x4d
+/* TSEC Memory Return Data Client Description */
+#define TEGRA186_MEMORY_CLIENT_TSECSRD 0x54
+/* TSEC Memory Write Client Description */
+#define TEGRA186_MEMORY_CLIENT_TSECSWR 0x55
+/* 3D, ltcx reads instance 0 */
+#define TEGRA186_MEMORY_CLIENT_GPUSRD 0x58
+/* 3D, ltcx writes instance 0 */
+#define TEGRA186_MEMORY_CLIENT_GPUSWR 0x59
+/* sdmmca memory read client */
+#define TEGRA186_MEMORY_CLIENT_SDMMCRA 0x60
+/* sdmmcbmemory read client */
+#define TEGRA186_MEMORY_CLIENT_SDMMCRAA 0x61
+/* sdmmc memory read client */
+#define TEGRA186_MEMORY_CLIENT_SDMMCR 0x62
+/* sdmmcd memory read client */
+#define TEGRA186_MEMORY_CLIENT_SDMMCRAB 0x63
+/* sdmmca memory write client */
+#define TEGRA186_MEMORY_CLIENT_SDMMCWA 0x64
+/* sdmmcb memory write client */
+#define TEGRA186_MEMORY_CLIENT_SDMMCWAA 0x65
+/* sdmmc memory write client */
+#define TEGRA186_MEMORY_CLIENT_SDMMCW 0x66
+/* sdmmcd memory write client */
+#define TEGRA186_MEMORY_CLIENT_SDMMCWAB 0x67
+#define TEGRA186_MEMORY_CLIENT_VICSRD 0x6c
+#define TEGRA186_MEMORY_CLIENT_VICSWR 0x6d
+/* VI Write client */
+#define TEGRA186_MEMORY_CLIENT_VIW 0x72
+#define TEGRA186_MEMORY_CLIENT_NVDECSRD 0x78
+#define TEGRA186_MEMORY_CLIENT_NVDECSWR 0x79
+/* Audio Processing (APE) engine reads */
+#define TEGRA186_MEMORY_CLIENT_APER 0x7a
+/* Audio Processing (APE) engine writes */
+#define TEGRA186_MEMORY_CLIENT_APEW 0x7b
+#define TEGRA186_MEMORY_CLIENT_NVJPGSRD 0x7e
+#define TEGRA186_MEMORY_CLIENT_NVJPGSWR 0x7f
+/* SE Memory Return Data Client Description */
+#define TEGRA186_MEMORY_CLIENT_SESRD 0x80
+/* SE Memory Write Client Description */
+#define TEGRA186_MEMORY_CLIENT_SESWR 0x81
+/* ETR reads */
+#define TEGRA186_MEMORY_CLIENT_ETRR 0x84
+/* ETR writes */
+#define TEGRA186_MEMORY_CLIENT_ETRW 0x85
+/* TSECB Memory Return Data Client Description */
+#define TEGRA186_MEMORY_CLIENT_TSECSRDB 0x86
+/* TSECB Memory Write Client Description */
+#define TEGRA186_MEMORY_CLIENT_TSECSWRB 0x87
+/* 3D, ltcx reads instance 1 */
+#define TEGRA186_MEMORY_CLIENT_GPUSRD2 0x88
+/* 3D, ltcx writes instance 1 */
+#define TEGRA186_MEMORY_CLIENT_GPUSWR2 0x89
+/* AXI Switch read client */
+#define TEGRA186_MEMORY_CLIENT_AXISR 0x8c
+/* AXI Switch write client */
+#define TEGRA186_MEMORY_CLIENT_AXISW 0x8d
+/* EQOS read client */
+#define TEGRA186_MEMORY_CLIENT_EQOSR 0x8e
+/* EQOS write client */
+#define TEGRA186_MEMORY_CLIENT_EQOSW 0x8f
+/* UFSHC read client */
+#define TEGRA186_MEMORY_CLIENT_UFSHCR 0x90
+/* UFSHC write client */
+#define TEGRA186_MEMORY_CLIENT_UFSHCW 0x91
+/* NVDISPLAY read client */
+#define TEGRA186_MEMORY_CLIENT_NVDISPLAYR 0x92
+/* BPMP read client */
+#define TEGRA186_MEMORY_CLIENT_BPMPR 0x93
+/* BPMP write client */
+#define TEGRA186_MEMORY_CLIENT_BPMPW 0x94
+/* BPMPDMA read client */
+#define TEGRA186_MEMORY_CLIENT_BPMPDMAR 0x95
+/* BPMPDMA write client */
+#define TEGRA186_MEMORY_CLIENT_BPMPDMAW 0x96
+/* AON read client */
+#define TEGRA186_MEMORY_CLIENT_AONR 0x97
+/* AON write client */
+#define TEGRA186_MEMORY_CLIENT_AONW 0x98
+/* AONDMA read client */
+#define TEGRA186_MEMORY_CLIENT_AONDMAR 0x99
+/* AONDMA write client */
+#define TEGRA186_MEMORY_CLIENT_AONDMAW 0x9a
+/* SCE read client */
+#define TEGRA186_MEMORY_CLIENT_SCER 0x9b
+/* SCE write client */
+#define TEGRA186_MEMORY_CLIENT_SCEW 0x9c
+/* SCEDMA read client */
+#define TEGRA186_MEMORY_CLIENT_SCEDMAR 0x9d
+/* SCEDMA write client */
+#define TEGRA186_MEMORY_CLIENT_SCEDMAW 0x9e
+/* APEDMA read client */
+#define TEGRA186_MEMORY_CLIENT_APEDMAR 0x9f
+/* APEDMA write client */
+#define TEGRA186_MEMORY_CLIENT_APEDMAW 0xa0
+/* NVDISPLAY read client instance 2 */
+#define TEGRA186_MEMORY_CLIENT_NVDISPLAYR1 0xa1
+#define TEGRA186_MEMORY_CLIENT_VICSRD1 0xa2
+#define TEGRA186_MEMORY_CLIENT_NVDECSRD1 0xa3
+
#endif
diff --git a/include/dt-bindings/memory/tegra194-mc.h b/include/dt-bindings/memory/tegra194-mc.h
new file mode 100644
index 000000000000..eed48b746bc9
--- /dev/null
+++ b/include/dt-bindings/memory/tegra194-mc.h
@@ -0,0 +1,410 @@
+#ifndef DT_BINDINGS_MEMORY_TEGRA194_MC_H
+#define DT_BINDINGS_MEMORY_TEGRA194_MC_H
+
+/* special clients */
+#define TEGRA194_SID_INVALID 0x00
+#define TEGRA194_SID_PASSTHROUGH 0x7f
+
+/* host1x clients */
+#define TEGRA194_SID_HOST1X 0x01
+#define TEGRA194_SID_CSI 0x02
+#define TEGRA194_SID_VIC 0x03
+#define TEGRA194_SID_VI 0x04
+#define TEGRA194_SID_ISP 0x05
+#define TEGRA194_SID_NVDEC 0x06
+#define TEGRA194_SID_NVENC 0x07
+#define TEGRA194_SID_NVJPG 0x08
+#define TEGRA194_SID_NVDISPLAY 0x09
+#define TEGRA194_SID_TSEC 0x0a
+#define TEGRA194_SID_TSECB 0x0b
+#define TEGRA194_SID_SE 0x0c
+#define TEGRA194_SID_SE1 0x0d
+#define TEGRA194_SID_SE2 0x0e
+#define TEGRA194_SID_SE3 0x0f
+
+/* GPU clients */
+#define TEGRA194_SID_GPU 0x10
+
+/* other SoC clients */
+#define TEGRA194_SID_AFI 0x11
+#define TEGRA194_SID_HDA 0x12
+#define TEGRA194_SID_ETR 0x13
+#define TEGRA194_SID_EQOS 0x14
+#define TEGRA194_SID_UFSHC 0x15
+#define TEGRA194_SID_AON 0x16
+#define TEGRA194_SID_SDMMC4 0x17
+#define TEGRA194_SID_SDMMC3 0x18
+#define TEGRA194_SID_SDMMC2 0x19
+#define TEGRA194_SID_SDMMC1 0x1a
+#define TEGRA194_SID_XUSB_HOST 0x1b
+#define TEGRA194_SID_XUSB_DEV 0x1c
+#define TEGRA194_SID_SATA 0x1d
+#define TEGRA194_SID_APE 0x1e
+#define TEGRA194_SID_SCE 0x1f
+
+/* GPC DMA clients */
+#define TEGRA194_SID_GPCDMA_0 0x20
+#define TEGRA194_SID_GPCDMA_1 0x21
+#define TEGRA194_SID_GPCDMA_2 0x22
+#define TEGRA194_SID_GPCDMA_3 0x23
+#define TEGRA194_SID_GPCDMA_4 0x24
+#define TEGRA194_SID_GPCDMA_5 0x25
+#define TEGRA194_SID_GPCDMA_6 0x26
+#define TEGRA194_SID_GPCDMA_7 0x27
+
+/* APE DMA clients */
+#define TEGRA194_SID_APE_1 0x28
+#define TEGRA194_SID_APE_2 0x29
+
+/* camera RTCPU */
+#define TEGRA194_SID_RCE 0x2a
+
+/* camera RTCPU on host1x address space */
+#define TEGRA194_SID_RCE_1X 0x2b
+
+/* APE DMA clients */
+#define TEGRA194_SID_APE_3 0x2c
+
+/* camera RTCPU running on APE */
+#define TEGRA194_SID_APE_CAM 0x2d
+#define TEGRA194_SID_APE_CAM_1X 0x2e
+
+#define TEGRA194_SID_RCE_RM 0x2f
+#define TEGRA194_SID_VI_FALCON 0x30
+#define TEGRA194_SID_ISP_FALCON 0x31
+
+/*
+ * The BPMP has its SID value hardcoded in the firmware. Changing it requires
+ * considerable effort.
+ */
+#define TEGRA194_SID_BPMP 0x32
+
+/* for SMMU tests */
+#define TEGRA194_SID_SMMU_TEST 0x33
+
+/* host1x virtualization channels */
+#define TEGRA194_SID_HOST1X_CTX0 0x38
+#define TEGRA194_SID_HOST1X_CTX1 0x39
+#define TEGRA194_SID_HOST1X_CTX2 0x3a
+#define TEGRA194_SID_HOST1X_CTX3 0x3b
+#define TEGRA194_SID_HOST1X_CTX4 0x3c
+#define TEGRA194_SID_HOST1X_CTX5 0x3d
+#define TEGRA194_SID_HOST1X_CTX6 0x3e
+#define TEGRA194_SID_HOST1X_CTX7 0x3f
+
+/* host1x command buffers */
+#define TEGRA194_SID_HOST1X_VM0 0x40
+#define TEGRA194_SID_HOST1X_VM1 0x41
+#define TEGRA194_SID_HOST1X_VM2 0x42
+#define TEGRA194_SID_HOST1X_VM3 0x43
+#define TEGRA194_SID_HOST1X_VM4 0x44
+#define TEGRA194_SID_HOST1X_VM5 0x45
+#define TEGRA194_SID_HOST1X_VM6 0x46
+#define TEGRA194_SID_HOST1X_VM7 0x47
+
+/* SE data buffers */
+#define TEGRA194_SID_SE_VM0 0x48
+#define TEGRA194_SID_SE_VM1 0x49
+#define TEGRA194_SID_SE_VM2 0x4a
+#define TEGRA194_SID_SE_VM3 0x4b
+#define TEGRA194_SID_SE_VM4 0x4c
+#define TEGRA194_SID_SE_VM5 0x4d
+#define TEGRA194_SID_SE_VM6 0x4e
+#define TEGRA194_SID_SE_VM7 0x4f
+
+#define TEGRA194_SID_MIU 0x50
+
+#define TEGRA194_SID_NVDLA0 0x51
+#define TEGRA194_SID_NVDLA1 0x52
+
+#define TEGRA194_SID_PVA0 0x53
+#define TEGRA194_SID_PVA1 0x54
+#define TEGRA194_SID_NVENC1 0x55
+#define TEGRA194_SID_PCIE0 0x56
+#define TEGRA194_SID_PCIE1 0x57
+#define TEGRA194_SID_PCIE2 0x58
+#define TEGRA194_SID_PCIE3 0x59
+#define TEGRA194_SID_PCIE4 0x5a
+#define TEGRA194_SID_PCIE5 0x5b
+#define TEGRA194_SID_NVDEC1 0x5c
+
+#define TEGRA194_SID_XUSB_VF0 0x5d
+#define TEGRA194_SID_XUSB_VF1 0x5e
+#define TEGRA194_SID_XUSB_VF2 0x5f
+#define TEGRA194_SID_XUSB_VF3 0x60
+
+#define TEGRA194_SID_RCE_VM3 0x61
+#define TEGRA194_SID_VI_VM2 0x62
+#define TEGRA194_SID_VI_VM3 0x63
+#define TEGRA194_SID_RCE_SERVER 0x64
+
+/*
+ * memory client IDs
+ */
+
+/* Misses from System Memory Management Unit (SMMU) Page Table Cache (PTC) */
+#define TEGRA194_MEMORY_CLIENT_PTCR 0x00
+/* MSS internal memqual MIU7 read clients */
+#define TEGRA194_MEMORY_CLIENT_MIU7R 0x01
+/* MSS internal memqual MIU7 write clients */
+#define TEGRA194_MEMORY_CLIENT_MIU7W 0x02
+/* High-definition audio (HDA) read clients */
+#define TEGRA194_MEMORY_CLIENT_HDAR 0x15
+/* Host channel data read clients */
+#define TEGRA194_MEMORY_CLIENT_HOST1XDMAR 0x16
+#define TEGRA194_MEMORY_CLIENT_NVENCSRD 0x1c
+/* SATA read clients */
+#define TEGRA194_MEMORY_CLIENT_SATAR 0x1f
+/* Reads from Cortex-A9 4 CPU cores via the L2 cache */
+#define TEGRA194_MEMORY_CLIENT_MPCORER 0x27
+#define TEGRA194_MEMORY_CLIENT_NVENCSWR 0x2b
+/* High-definition audio (HDA) write clients */
+#define TEGRA194_MEMORY_CLIENT_HDAW 0x35
+/* Writes from Cortex-A9 4 CPU cores via the L2 cache */
+#define TEGRA194_MEMORY_CLIENT_MPCOREW 0x39
+/* SATA write clients */
+#define TEGRA194_MEMORY_CLIENT_SATAW 0x3d
+/* ISP read client for Crossbar A */
+#define TEGRA194_MEMORY_CLIENT_ISPRA 0x44
+/* ISP read client 1 for Crossbar A */
+#define TEGRA194_MEMORY_CLIENT_ISPFALR 0x45
+/* ISP Write client for Crossbar A */
+#define TEGRA194_MEMORY_CLIENT_ISPWA 0x46
+/* ISP Write client Crossbar B */
+#define TEGRA194_MEMORY_CLIENT_ISPWB 0x47
+/* XUSB_HOST read clients */
+#define TEGRA194_MEMORY_CLIENT_XUSB_HOSTR 0x4a
+/* XUSB_HOST write clients */
+#define TEGRA194_MEMORY_CLIENT_XUSB_HOSTW 0x4b
+/* XUSB read clients */
+#define TEGRA194_MEMORY_CLIENT_XUSB_DEVR 0x4c
+/* XUSB_DEV write clients */
+#define TEGRA194_MEMORY_CLIENT_XUSB_DEVW 0x4d
+/* sdmmca memory read client */
+#define TEGRA194_MEMORY_CLIENT_SDMMCRA 0x60
+/* sdmmc memory read client */
+#define TEGRA194_MEMORY_CLIENT_SDMMCR 0x62
+/* sdmmcd memory read client */
+#define TEGRA194_MEMORY_CLIENT_SDMMCRAB 0x63
+/* sdmmca memory write client */
+#define TEGRA194_MEMORY_CLIENT_SDMMCWA 0x64
+/* sdmmc memory write client */
+#define TEGRA194_MEMORY_CLIENT_SDMMCW 0x66
+/* sdmmcd memory write client */
+#define TEGRA194_MEMORY_CLIENT_SDMMCWAB 0x67
+#define TEGRA194_MEMORY_CLIENT_VICSRD 0x6c
+#define TEGRA194_MEMORY_CLIENT_VICSWR 0x6d
+/* VI Write client */
+#define TEGRA194_MEMORY_CLIENT_VIW 0x72
+#define TEGRA194_MEMORY_CLIENT_NVDECSRD 0x78
+#define TEGRA194_MEMORY_CLIENT_NVDECSWR 0x79
+/* Audio Processing (APE) engine read clients */
+#define TEGRA194_MEMORY_CLIENT_APER 0x7a
+/* Audio Processing (APE) engine write clients */
+#define TEGRA194_MEMORY_CLIENT_APEW 0x7b
+#define TEGRA194_MEMORY_CLIENT_NVJPGSRD 0x7e
+#define TEGRA194_MEMORY_CLIENT_NVJPGSWR 0x7f
+/* AXI AP and DFD-AUX0/1 read clients Both share the same interface on the on MSS */
+#define TEGRA194_MEMORY_CLIENT_AXIAPR 0x82
+/* AXI AP and DFD-AUX0/1 write clients Both sahre the same interface on MSS */
+#define TEGRA194_MEMORY_CLIENT_AXIAPW 0x83
+/* ETR read clients */
+#define TEGRA194_MEMORY_CLIENT_ETRR 0x84
+/* ETR write clients */
+#define TEGRA194_MEMORY_CLIENT_ETRW 0x85
+/* AXI Switch read client */
+#define TEGRA194_MEMORY_CLIENT_AXISR 0x8c
+/* AXI Switch write client */
+#define TEGRA194_MEMORY_CLIENT_AXISW 0x8d
+/* EQOS read client */
+#define TEGRA194_MEMORY_CLIENT_EQOSR 0x8e
+/* EQOS write client */
+#define TEGRA194_MEMORY_CLIENT_EQOSW 0x8f
+/* UFSHC read client */
+#define TEGRA194_MEMORY_CLIENT_UFSHCR 0x90
+/* UFSHC write client */
+#define TEGRA194_MEMORY_CLIENT_UFSHCW 0x91
+/* NVDISPLAY read client */
+#define TEGRA194_MEMORY_CLIENT_NVDISPLAYR 0x92
+/* BPMP read client */
+#define TEGRA194_MEMORY_CLIENT_BPMPR 0x93
+/* BPMP write client */
+#define TEGRA194_MEMORY_CLIENT_BPMPW 0x94
+/* BPMPDMA read client */
+#define TEGRA194_MEMORY_CLIENT_BPMPDMAR 0x95
+/* BPMPDMA write client */
+#define TEGRA194_MEMORY_CLIENT_BPMPDMAW 0x96
+/* AON read client */
+#define TEGRA194_MEMORY_CLIENT_AONR 0x97
+/* AON write client */
+#define TEGRA194_MEMORY_CLIENT_AONW 0x98
+/* AONDMA read client */
+#define TEGRA194_MEMORY_CLIENT_AONDMAR 0x99
+/* AONDMA write client */
+#define TEGRA194_MEMORY_CLIENT_AONDMAW 0x9a
+/* SCE read client */
+#define TEGRA194_MEMORY_CLIENT_SCER 0x9b
+/* SCE write client */
+#define TEGRA194_MEMORY_CLIENT_SCEW 0x9c
+/* SCEDMA read client */
+#define TEGRA194_MEMORY_CLIENT_SCEDMAR 0x9d
+/* SCEDMA write client */
+#define TEGRA194_MEMORY_CLIENT_SCEDMAW 0x9e
+/* APEDMA read client */
+#define TEGRA194_MEMORY_CLIENT_APEDMAR 0x9f
+/* APEDMA write client */
+#define TEGRA194_MEMORY_CLIENT_APEDMAW 0xa0
+/* NVDISPLAY read client instance 2 */
+#define TEGRA194_MEMORY_CLIENT_NVDISPLAYR1 0xa1
+#define TEGRA194_MEMORY_CLIENT_VICSRD1 0xa2
+#define TEGRA194_MEMORY_CLIENT_NVDECSRD1 0xa3
+/* MSS internal memqual MIU0 read clients */
+#define TEGRA194_MEMORY_CLIENT_MIU0R 0xa6
+/* MSS internal memqual MIU0 write clients */
+#define TEGRA194_MEMORY_CLIENT_MIU0W 0xa7
+/* MSS internal memqual MIU1 read clients */
+#define TEGRA194_MEMORY_CLIENT_MIU1R 0xa8
+/* MSS internal memqual MIU1 write clients */
+#define TEGRA194_MEMORY_CLIENT_MIU1W 0xa9
+/* MSS internal memqual MIU2 read clients */
+#define TEGRA194_MEMORY_CLIENT_MIU2R 0xae
+/* MSS internal memqual MIU2 write clients */
+#define TEGRA194_MEMORY_CLIENT_MIU2W 0xaf
+/* MSS internal memqual MIU3 read clients */
+#define TEGRA194_MEMORY_CLIENT_MIU3R 0xb0
+/* MSS internal memqual MIU3 write clients */
+#define TEGRA194_MEMORY_CLIENT_MIU3W 0xb1
+/* MSS internal memqual MIU4 read clients */
+#define TEGRA194_MEMORY_CLIENT_MIU4R 0xb2
+/* MSS internal memqual MIU4 write clients */
+#define TEGRA194_MEMORY_CLIENT_MIU4W 0xb3
+#define TEGRA194_MEMORY_CLIENT_DPMUR 0xb4
+#define TEGRA194_MEMORY_CLIENT_DPMUW 0xb5
+#define TEGRA194_MEMORY_CLIENT_NVL0R 0xb6
+#define TEGRA194_MEMORY_CLIENT_NVL0W 0xb7
+#define TEGRA194_MEMORY_CLIENT_NVL1R 0xb8
+#define TEGRA194_MEMORY_CLIENT_NVL1W 0xb9
+#define TEGRA194_MEMORY_CLIENT_NVL2R 0xba
+#define TEGRA194_MEMORY_CLIENT_NVL2W 0xbb
+/* VI FLACON read clients */
+#define TEGRA194_MEMORY_CLIENT_VIFALR 0xbc
+/* VIFAL write clients */
+#define TEGRA194_MEMORY_CLIENT_VIFALW 0xbd
+/* DLA0ARDA read clients */
+#define TEGRA194_MEMORY_CLIENT_DLA0RDA 0xbe
+/* DLA0 Falcon read clients */
+#define TEGRA194_MEMORY_CLIENT_DLA0FALRDB 0xbf
+/* DLA0 write clients */
+#define TEGRA194_MEMORY_CLIENT_DLA0WRA 0xc0
+/* DLA0 write clients */
+#define TEGRA194_MEMORY_CLIENT_DLA0FALWRB 0xc1
+/* DLA1ARDA read clients */
+#define TEGRA194_MEMORY_CLIENT_DLA1RDA 0xc2
+/* DLA1 Falcon read clients */
+#define TEGRA194_MEMORY_CLIENT_DLA1FALRDB 0xc3
+/* DLA1 write clients */
+#define TEGRA194_MEMORY_CLIENT_DLA1WRA 0xc4
+/* DLA1 write clients */
+#define TEGRA194_MEMORY_CLIENT_DLA1FALWRB 0xc5
+/* PVA0RDA read clients */
+#define TEGRA194_MEMORY_CLIENT_PVA0RDA 0xc6
+/* PVA0RDB read clients */
+#define TEGRA194_MEMORY_CLIENT_PVA0RDB 0xc7
+/* PVA0RDC read clients */
+#define TEGRA194_MEMORY_CLIENT_PVA0RDC 0xc8
+/* PVA0WRA write clients */
+#define TEGRA194_MEMORY_CLIENT_PVA0WRA 0xc9
+/* PVA0WRB write clients */
+#define TEGRA194_MEMORY_CLIENT_PVA0WRB 0xca
+/* PVA0WRC write clients */
+#define TEGRA194_MEMORY_CLIENT_PVA0WRC 0xcb
+/* PVA1RDA read clients */
+#define TEGRA194_MEMORY_CLIENT_PVA1RDA 0xcc
+/* PVA1RDB read clients */
+#define TEGRA194_MEMORY_CLIENT_PVA1RDB 0xcd
+/* PVA1RDC read clients */
+#define TEGRA194_MEMORY_CLIENT_PVA1RDC 0xce
+/* PVA1WRA write clients */
+#define TEGRA194_MEMORY_CLIENT_PVA1WRA 0xcf
+/* PVA1WRB write clients */
+#define TEGRA194_MEMORY_CLIENT_PVA1WRB 0xd0
+/* PVA1WRC write clients */
+#define TEGRA194_MEMORY_CLIENT_PVA1WRC 0xd1
+/* RCE read client */
+#define TEGRA194_MEMORY_CLIENT_RCER 0xd2
+/* RCE write client */
+#define TEGRA194_MEMORY_CLIENT_RCEW 0xd3
+/* RCEDMA read client */
+#define TEGRA194_MEMORY_CLIENT_RCEDMAR 0xd4
+/* RCEDMA write client */
+#define TEGRA194_MEMORY_CLIENT_RCEDMAW 0xd5
+#define TEGRA194_MEMORY_CLIENT_NVENC1SRD 0xd6
+#define TEGRA194_MEMORY_CLIENT_NVENC1SWR 0xd7
+/* PCIE0 read clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE0R 0xd8
+/* PCIE0 write clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE0W 0xd9
+/* PCIE1 read clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE1R 0xda
+/* PCIE1 write clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE1W 0xdb
+/* PCIE2 read clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE2AR 0xdc
+/* PCIE2 write clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE2AW 0xdd
+/* PCIE3 read clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE3R 0xde
+/* PCIE3 write clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE3W 0xdf
+/* PCIE4 read clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE4R 0xe0
+/* PCIE4 write clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE4W 0xe1
+/* PCIE5 read clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE5R 0xe2
+/* PCIE5 write clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE5W 0xe3
+/* ISP read client 1 for Crossbar A */
+#define TEGRA194_MEMORY_CLIENT_ISPFALW 0xe4
+#define TEGRA194_MEMORY_CLIENT_NVL3R 0xe5
+#define TEGRA194_MEMORY_CLIENT_NVL3W 0xe6
+#define TEGRA194_MEMORY_CLIENT_NVL4R 0xe7
+#define TEGRA194_MEMORY_CLIENT_NVL4W 0xe8
+/* DLA0ARDA1 read clients */
+#define TEGRA194_MEMORY_CLIENT_DLA0RDA1 0xe9
+/* DLA1ARDA1 read clients */
+#define TEGRA194_MEMORY_CLIENT_DLA1RDA1 0xea
+/* PVA0RDA1 read clients */
+#define TEGRA194_MEMORY_CLIENT_PVA0RDA1 0xeb
+/* PVA0RDB1 read clients */
+#define TEGRA194_MEMORY_CLIENT_PVA0RDB1 0xec
+/* PVA1RDA1 read clients */
+#define TEGRA194_MEMORY_CLIENT_PVA1RDA1 0xed
+/* PVA1RDB1 read clients */
+#define TEGRA194_MEMORY_CLIENT_PVA1RDB1 0xee
+/* PCIE5r1 read clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE5R1 0xef
+#define TEGRA194_MEMORY_CLIENT_NVENCSRD1 0xf0
+#define TEGRA194_MEMORY_CLIENT_NVENC1SRD1 0xf1
+/* ISP read client for Crossbar A */
+#define TEGRA194_MEMORY_CLIENT_ISPRA1 0xf2
+/* PCIE0 read clients */
+#define TEGRA194_MEMORY_CLIENT_PCIE0R1 0xf3
+#define TEGRA194_MEMORY_CLIENT_NVL0RHP 0xf4
+#define TEGRA194_MEMORY_CLIENT_NVL1RHP 0xf5
+#define TEGRA194_MEMORY_CLIENT_NVL2RHP 0xf6
+#define TEGRA194_MEMORY_CLIENT_NVL3RHP 0xf7
+#define TEGRA194_MEMORY_CLIENT_NVL4RHP 0xf8
+#define TEGRA194_MEMORY_CLIENT_NVDEC1SRD 0xf9
+#define TEGRA194_MEMORY_CLIENT_NVDEC1SRD1 0xfa
+#define TEGRA194_MEMORY_CLIENT_NVDEC1SWR 0xfb
+/* MSS internal memqual MIU5 read clients */
+#define TEGRA194_MEMORY_CLIENT_MIU5R 0xfc
+/* MSS internal memqual MIU5 write clients */
+#define TEGRA194_MEMORY_CLIENT_MIU5W 0xfd
+/* MSS internal memqual MIU6 read clients */
+#define TEGRA194_MEMORY_CLIENT_MIU6R 0xfe
+/* MSS internal memqual MIU6 write clients */
+#define TEGRA194_MEMORY_CLIENT_MIU6W 0xff
+
+#endif
diff --git a/include/dt-bindings/net/qca-ar803x.h b/include/dt-bindings/net/qca-ar803x.h
new file mode 100644
index 000000000000..9c046c7242ed
--- /dev/null
+++ b/include/dt-bindings/net/qca-ar803x.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Device Tree constants for the Qualcomm Atheros AR803x PHYs
+ */
+
+#ifndef _DT_BINDINGS_QCA_AR803X_H
+#define _DT_BINDINGS_QCA_AR803X_H
+
+#define AR803X_STRENGTH_FULL 0
+#define AR803X_STRENGTH_HALF 1
+#define AR803X_STRENGTH_QUARTER 2
+
+#endif
diff --git a/include/dt-bindings/net/ti-dp83869.h b/include/dt-bindings/net/ti-dp83869.h
new file mode 100644
index 000000000000..218b1a64e975
--- /dev/null
+++ b/include/dt-bindings/net/ti-dp83869.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Device Tree constants for the Texas Instruments DP83869 PHY
+ *
+ * Author: Dan Murphy <dmurphy@ti.com>
+ *
+ * Copyright: (C) 2019 Texas Instruments, Inc.
+ */
+
+#ifndef _DT_BINDINGS_TI_DP83869_H
+#define _DT_BINDINGS_TI_DP83869_H
+
+/* PHY CTRL bits */
+#define DP83869_PHYCR_FIFO_DEPTH_3_B_NIB 0x00
+#define DP83869_PHYCR_FIFO_DEPTH_4_B_NIB 0x01
+#define DP83869_PHYCR_FIFO_DEPTH_6_B_NIB 0x02
+#define DP83869_PHYCR_FIFO_DEPTH_8_B_NIB 0x03
+
+/* IO_MUX_CFG - Clock output selection */
+#define DP83869_CLK_O_SEL_CHN_A_RCLK 0x0
+#define DP83869_CLK_O_SEL_CHN_B_RCLK 0x1
+#define DP83869_CLK_O_SEL_CHN_C_RCLK 0x2
+#define DP83869_CLK_O_SEL_CHN_D_RCLK 0x3
+#define DP83869_CLK_O_SEL_CHN_A_RCLK_DIV5 0x4
+#define DP83869_CLK_O_SEL_CHN_B_RCLK_DIV5 0x5
+#define DP83869_CLK_O_SEL_CHN_C_RCLK_DIV5 0x6
+#define DP83869_CLK_O_SEL_CHN_D_RCLK_DIV5 0x7
+#define DP83869_CLK_O_SEL_CHN_A_TCLK 0x8
+#define DP83869_CLK_O_SEL_CHN_B_TCLK 0x9
+#define DP83869_CLK_O_SEL_CHN_C_TCLK 0xa
+#define DP83869_CLK_O_SEL_CHN_D_TCLK 0xb
+#define DP83869_CLK_O_SEL_REF_CLK 0xc
+
+#define DP83869_RGMII_COPPER_ETHERNET 0x00
+#define DP83869_RGMII_1000_BASE 0x01
+#define DP83869_RGMII_100_BASE 0x02
+#define DP83869_RGMII_SGMII_BRIDGE 0x03
+#define DP83869_1000M_MEDIA_CONVERT 0x04
+#define DP83869_100M_MEDIA_CONVERT 0x05
+#define DP83869_SGMII_COPPER_ETHERNET 0x06
+
+#endif
diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h
index b6a1eaf1b339..1f3f866fae7b 100644
--- a/include/dt-bindings/phy/phy.h
+++ b/include/dt-bindings/phy/phy.h
@@ -16,5 +16,6 @@
#define PHY_TYPE_USB2 3
#define PHY_TYPE_USB3 4
#define PHY_TYPE_UFS 5
+#define PHY_TYPE_DP 6
#endif /* _DT_BINDINGS_PHY */
diff --git a/include/dt-bindings/pinctrl/at91.h b/include/dt-bindings/pinctrl/at91.h
index 3831f91fb3ba..e8e117306b1b 100644
--- a/include/dt-bindings/pinctrl/at91.h
+++ b/include/dt-bindings/pinctrl/at91.h
@@ -27,8 +27,8 @@
#define AT91_PINCTRL_DRIVE_STRENGTH_MED (0x2 << 5)
#define AT91_PINCTRL_DRIVE_STRENGTH_HI (0x3 << 5)
-#define AT91_PINCTRL_SLEWRATE_DIS (0x0 << 9)
-#define AT91_PINCTRL_SLEWRATE_ENA (0x1 << 9)
+#define AT91_PINCTRL_SLEWRATE_ENA (0x0 << 9)
+#define AT91_PINCTRL_SLEWRATE_DIS (0x1 << 9)
#define AT91_PIOA 0
#define AT91_PIOB 1
diff --git a/include/dt-bindings/pinctrl/rockchip.h b/include/dt-bindings/pinctrl/rockchip.h
index dc5c1c73d030..6d6bac1c26d7 100644
--- a/include/dt-bindings/pinctrl/rockchip.h
+++ b/include/dt-bindings/pinctrl/rockchip.h
@@ -50,9 +50,9 @@
#define RK_PD7 31
#define RK_FUNC_GPIO 0
-#define RK_FUNC_1 1
-#define RK_FUNC_2 2
-#define RK_FUNC_3 3
-#define RK_FUNC_4 4
+#define RK_FUNC_1 1 /* deprecated */
+#define RK_FUNC_2 2 /* deprecated */
+#define RK_FUNC_3 3 /* deprecated */
+#define RK_FUNC_4 4 /* deprecated */
#endif
diff --git a/include/dt-bindings/pmu/exynos_ppmu.h b/include/dt-bindings/pmu/exynos_ppmu.h
new file mode 100644
index 000000000000..8724abe130f3
--- /dev/null
+++ b/include/dt-bindings/pmu/exynos_ppmu.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Samsung Exynos PPMU event types for counting in regs
+ *
+ * Copyright (c) 2019, Samsung Electronics
+ * Author: Lukasz Luba <l.luba@partner.samsung.com>
+ */
+
+#ifndef __DT_BINDINGS_PMU_EXYNOS_PPMU_H
+#define __DT_BINDINGS_PMU_EXYNOS_PPMU_H
+
+#define PPMU_RO_BUSY_CYCLE_CNT 0x0
+#define PPMU_WO_BUSY_CYCLE_CNT 0x1
+#define PPMU_RW_BUSY_CYCLE_CNT 0x2
+#define PPMU_RO_REQUEST_CNT 0x3
+#define PPMU_WO_REQUEST_CNT 0x4
+#define PPMU_RO_DATA_CNT 0x5
+#define PPMU_WO_DATA_CNT 0x6
+#define PPMU_RO_LATENCY 0x12
+#define PPMU_WO_LATENCY 0x16
+#define PPMU_V2_RO_DATA_CNT 0x4
+#define PPMU_V2_WO_DATA_CNT 0x5
+#define PPMU_V2_EVT3_RW_DATA_CNT 0x22
+
+#endif
diff --git a/include/dt-bindings/power/mt6765-power.h b/include/dt-bindings/power/mt6765-power.h
new file mode 100644
index 000000000000..d347b4ee9eed
--- /dev/null
+++ b/include/dt-bindings/power/mt6765-power.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _DT_BINDINGS_POWER_MT6765_POWER_H
+#define _DT_BINDINGS_POWER_MT6765_POWER_H
+
+#define MT6765_POWER_DOMAIN_CONN 0
+#define MT6765_POWER_DOMAIN_MM 1
+#define MT6765_POWER_DOMAIN_MFG_ASYNC 2
+#define MT6765_POWER_DOMAIN_ISP 3
+#define MT6765_POWER_DOMAIN_MFG 4
+#define MT6765_POWER_DOMAIN_MFG_CORE0 5
+#define MT6765_POWER_DOMAIN_CAM 6
+#define MT6765_POWER_DOMAIN_VCODEC 7
+
+#endif /* _DT_BINDINGS_POWER_MT6765_POWER_H */
diff --git a/include/dt-bindings/power/qcom-rpmpd.h b/include/dt-bindings/power/qcom-rpmpd.h
index 93e36d011527..3f74096d5a7c 100644
--- a/include/dt-bindings/power/qcom-rpmpd.h
+++ b/include/dt-bindings/power/qcom-rpmpd.h
@@ -15,18 +15,50 @@
#define SDM845_GFX 7
#define SDM845_MSS 8
+/* SM8150 Power Domain Indexes */
+#define SM8150_MSS 0
+#define SM8150_EBI 1
+#define SM8150_LMX 2
+#define SM8150_LCX 3
+#define SM8150_GFX 4
+#define SM8150_MX 5
+#define SM8150_MX_AO 6
+#define SM8150_CX 7
+#define SM8150_CX_AO 8
+#define SM8150_MMCX 9
+#define SM8150_MMCX_AO 10
+
+/* SC7180 Power Domain Indexes */
+#define SC7180_CX 0
+#define SC7180_CX_AO 1
+#define SC7180_GFX 2
+#define SC7180_MX 3
+#define SC7180_MX_AO 4
+#define SC7180_LMX 5
+#define SC7180_LCX 6
+#define SC7180_MSS 7
+
/* SDM845 Power Domain performance levels */
#define RPMH_REGULATOR_LEVEL_RETENTION 16
#define RPMH_REGULATOR_LEVEL_MIN_SVS 48
#define RPMH_REGULATOR_LEVEL_LOW_SVS 64
#define RPMH_REGULATOR_LEVEL_SVS 128
#define RPMH_REGULATOR_LEVEL_SVS_L1 192
+#define RPMH_REGULATOR_LEVEL_SVS_L2 224
#define RPMH_REGULATOR_LEVEL_NOM 256
#define RPMH_REGULATOR_LEVEL_NOM_L1 320
#define RPMH_REGULATOR_LEVEL_NOM_L2 336
#define RPMH_REGULATOR_LEVEL_TURBO 384
#define RPMH_REGULATOR_LEVEL_TURBO_L1 416
+/* MSM8976 Power Domain Indexes */
+#define MSM8976_VDDCX 0
+#define MSM8976_VDDCX_AO 1
+#define MSM8976_VDDCX_VFL 2
+#define MSM8976_VDDMX 3
+#define MSM8976_VDDMX_AO 4
+#define MSM8976_VDDMX_VFL 5
+
/* MSM8996 Power Domain Indexes */
#define MSM8996_VDDCX 0
#define MSM8996_VDDCX_AO 1
@@ -68,6 +100,7 @@
#define RPM_SMD_LEVEL_NOM_PLUS 320
#define RPM_SMD_LEVEL_TURBO 384
#define RPM_SMD_LEVEL_TURBO_NO_CPR 416
+#define RPM_SMD_LEVEL_TURBO_HIGH 448
#define RPM_SMD_LEVEL_BINNING 512
#endif
diff --git a/include/dt-bindings/power/r8a774b1-sysc.h b/include/dt-bindings/power/r8a774b1-sysc.h
new file mode 100644
index 000000000000..373736402f04
--- /dev/null
+++ b/include/dt-bindings/power/r8a774b1-sysc.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Renesas Electronics Corp.
+ */
+#ifndef __DT_BINDINGS_POWER_R8A774B1_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A774B1_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A774B1_PD_CA57_CPU0 0
+#define R8A774B1_PD_CA57_CPU1 1
+#define R8A774B1_PD_A3VP 9
+#define R8A774B1_PD_CA57_SCU 12
+#define R8A774B1_PD_A3VC 14
+#define R8A774B1_PD_3DG_A 17
+#define R8A774B1_PD_3DG_B 18
+#define R8A774B1_PD_A2VC1 26
+
+/* Always-on power area */
+#define R8A774B1_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A774B1_SYSC_H__ */
diff --git a/include/dt-bindings/power/r8a77961-sysc.h b/include/dt-bindings/power/r8a77961-sysc.h
new file mode 100644
index 000000000000..7a3800996f7c
--- /dev/null
+++ b/include/dt-bindings/power/r8a77961-sysc.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2019 Glider bvba
+ */
+#ifndef __DT_BINDINGS_POWER_R8A77961_SYSC_H__
+#define __DT_BINDINGS_POWER_R8A77961_SYSC_H__
+
+/*
+ * These power domain indices match the numbers of the interrupt bits
+ * representing the power areas in the various Interrupt Registers
+ * (e.g. SYSCISR, Interrupt Status Register)
+ */
+
+#define R8A77961_PD_CA57_CPU0 0
+#define R8A77961_PD_CA57_CPU1 1
+#define R8A77961_PD_CA53_CPU0 5
+#define R8A77961_PD_CA53_CPU1 6
+#define R8A77961_PD_CA53_CPU2 7
+#define R8A77961_PD_CA53_CPU3 8
+#define R8A77961_PD_CA57_SCU 12
+#define R8A77961_PD_CR7 13
+#define R8A77961_PD_A3VC 14
+#define R8A77961_PD_3DG_A 17
+#define R8A77961_PD_3DG_B 18
+#define R8A77961_PD_CA53_SCU 21
+#define R8A77961_PD_A3IR 24
+#define R8A77961_PD_A2VC1 26
+
+/* Always-on power area */
+#define R8A77961_PD_ALWAYS_ON 32
+
+#endif /* __DT_BINDINGS_POWER_R8A77961_SYSC_H__ */
diff --git a/include/dt-bindings/regulator/dlg,da9063-regulator.h b/include/dt-bindings/regulator/dlg,da9063-regulator.h
new file mode 100644
index 000000000000..1de710dd0899
--- /dev/null
+++ b/include/dt-bindings/regulator/dlg,da9063-regulator.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _DT_BINDINGS_REGULATOR_DLG_DA9063_H
+#define _DT_BINDINGS_REGULATOR_DLG_DA9063_H
+
+/*
+ * These buck mode constants may be used to specify values in device tree
+ * properties (e.g. regulator-initial-mode).
+ * A description of the following modes is in the manufacturers datasheet.
+ */
+
+#define DA9063_BUCK_MODE_SLEEP 1
+#define DA9063_BUCK_MODE_SYNC 2
+#define DA9063_BUCK_MODE_AUTO 3
+
+#endif
diff --git a/include/dt-bindings/reset-controller/mt2712-resets.h b/include/dt-bindings/reset-controller/mt2712-resets.h
new file mode 100644
index 000000000000..9e7ee762f076
--- /dev/null
+++ b/include/dt-bindings/reset-controller/mt2712-resets.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ * Author: Yong Liang <yong.liang@mediatek.com>
+ */
+
+#ifndef _DT_BINDINGS_RESET_CONTROLLER_MT2712
+#define _DT_BINDINGS_RESET_CONTROLLER_MT2712
+
+#define MT2712_TOPRGU_INFRA_SW_RST 0
+#define MT2712_TOPRGU_MM_SW_RST 1
+#define MT2712_TOPRGU_MFG_SW_RST 2
+#define MT2712_TOPRGU_VENC_SW_RST 3
+#define MT2712_TOPRGU_VDEC_SW_RST 4
+#define MT2712_TOPRGU_IMG_SW_RST 5
+#define MT2712_TOPRGU_INFRA_AO_SW_RST 8
+#define MT2712_TOPRGU_USB_SW_RST 9
+#define MT2712_TOPRGU_APMIXED_SW_RST 10
+
+#define MT2712_TOPRGU_SW_RST_NUM 11
+
+#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT2712 */
diff --git a/include/dt-bindings/reset-controller/mt8183-resets.h b/include/dt-bindings/reset-controller/mt8183-resets.h
index 8804e34ebdd4..a1bbd41e0d12 100644
--- a/include/dt-bindings/reset-controller/mt8183-resets.h
+++ b/include/dt-bindings/reset-controller/mt8183-resets.h
@@ -78,4 +78,21 @@
#define MT8183_INFRACFG_AO_I2C7_SW_RST 126
#define MT8183_INFRACFG_AO_I2C8_SW_RST 127
+#define MT8183_INFRACFG_SW_RST_NUM 128
+
+#define MT8183_TOPRGU_MM_SW_RST 1
+#define MT8183_TOPRGU_MFG_SW_RST 2
+#define MT8183_TOPRGU_VENC_SW_RST 3
+#define MT8183_TOPRGU_VDEC_SW_RST 4
+#define MT8183_TOPRGU_IMG_SW_RST 5
+#define MT8183_TOPRGU_MD_SW_RST 7
+#define MT8183_TOPRGU_CONN_SW_RST 9
+#define MT8183_TOPRGU_CONN_MCU_SW_RST 12
+#define MT8183_TOPRGU_IPU0_SW_RST 14
+#define MT8183_TOPRGU_IPU1_SW_RST 15
+#define MT8183_TOPRGU_AUDIO_SW_RST 17
+#define MT8183_TOPRGU_CAMSYS_SW_RST 18
+
+#define MT8183_TOPRGU_SW_RST_NUM 19
+
#endif /* _DT_BINDINGS_RESET_CONTROLLER_MT8183 */
diff --git a/include/dt-bindings/reset/amlogic,meson-a1-reset.h b/include/dt-bindings/reset/amlogic,meson-a1-reset.h
new file mode 100644
index 000000000000..f1a3a797540d
--- /dev/null
+++ b/include/dt-bindings/reset/amlogic,meson-a1-reset.h
@@ -0,0 +1,74 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR MIT)
+ *
+ * Copyright (c) 2019 Amlogic, Inc. All rights reserved.
+ * Author: Xingyu Chen <xingyu.chen@amlogic.com>
+ *
+ */
+
+#ifndef _DT_BINDINGS_AMLOGIC_MESON_A1_RESET_H
+#define _DT_BINDINGS_AMLOGIC_MESON_A1_RESET_H
+
+/* RESET0 */
+/* 0 */
+#define RESET_AM2AXI_VAD 1
+/* 2-3 */
+#define RESET_PSRAM 4
+#define RESET_PAD_CTRL 5
+/* 6 */
+#define RESET_TEMP_SENSOR 7
+#define RESET_AM2AXI_DEV 8
+/* 9 */
+#define RESET_SPICC_A 10
+#define RESET_MSR_CLK 11
+#define RESET_AUDIO 12
+#define RESET_ANALOG_CTRL 13
+#define RESET_SAR_ADC 14
+#define RESET_AUDIO_VAD 15
+#define RESET_CEC 16
+#define RESET_PWM_EF 17
+#define RESET_PWM_CD 18
+#define RESET_PWM_AB 19
+/* 20 */
+#define RESET_IR_CTRL 21
+#define RESET_I2C_S_A 22
+/* 23 */
+#define RESET_I2C_M_D 24
+#define RESET_I2C_M_C 25
+#define RESET_I2C_M_B 26
+#define RESET_I2C_M_A 27
+#define RESET_I2C_PROD_AHB 28
+#define RESET_I2C_PROD 29
+/* 30-31 */
+
+/* RESET1 */
+#define RESET_ACODEC 32
+#define RESET_DMA 33
+#define RESET_SD_EMMC_A 34
+/* 35 */
+#define RESET_USBCTRL 36
+/* 37 */
+#define RESET_USBPHY 38
+/* 39-41 */
+#define RESET_RSA 42
+#define RESET_DMC 43
+/* 44 */
+#define RESET_IRQ_CTRL 45
+/* 46 */
+#define RESET_NIC_VAD 47
+#define RESET_NIC_AXI 48
+#define RESET_RAMA 49
+#define RESET_RAMB 50
+/* 51-52 */
+#define RESET_ROM 53
+#define RESET_SPIFC 54
+#define RESET_GIC 55
+#define RESET_UART_C 56
+#define RESET_UART_B 57
+#define RESET_UART_A 58
+#define RESET_OSC_RING 59
+/* 60-63 */
+
+/* RESET2 */
+/* 64-95 */
+
+#endif
diff --git a/include/dt-bindings/reset/amlogic,meson-axg-audio-arb.h b/include/dt-bindings/reset/amlogic,meson-axg-audio-arb.h
index 05c36367875c..1ef807856cb8 100644
--- a/include/dt-bindings/reset/amlogic,meson-axg-audio-arb.h
+++ b/include/dt-bindings/reset/amlogic,meson-axg-audio-arb.h
@@ -13,5 +13,7 @@
#define AXG_ARB_FRDDR_A 3
#define AXG_ARB_FRDDR_B 4
#define AXG_ARB_FRDDR_C 5
+#define AXG_ARB_TODDR_D 6
+#define AXG_ARB_FRDDR_D 7
#endif /* _DT_BINDINGS_AMLOGIC_MESON_AXG_AUDIO_ARB_H */
diff --git a/include/dt-bindings/reset/amlogic,meson-g12a-audio-reset.h b/include/dt-bindings/reset/amlogic,meson-g12a-audio-reset.h
index 14b78dabed0e..f805129ca7af 100644
--- a/include/dt-bindings/reset/amlogic,meson-g12a-audio-reset.h
+++ b/include/dt-bindings/reset/amlogic,meson-g12a-audio-reset.h
@@ -35,4 +35,19 @@
#define AUD_RESET_TOHDMITX 24
#define AUD_RESET_CLKTREE 25
+/* SM1 added resets */
+#define AUD_RESET_RESAMPLE_B 26
+#define AUD_RESET_TOVAD 27
+#define AUD_RESET_LOCKER 28
+#define AUD_RESET_SPDIFIN_LB 29
+#define AUD_RESET_FRATV 30
+#define AUD_RESET_FRHDMIRX 31
+#define AUD_RESET_FRDDR_D 32
+#define AUD_RESET_TODDR_D 33
+#define AUD_RESET_LOOPBACK_B 34
+#define AUD_RESET_EARCTX 35
+#define AUD_RESET_EARCRX 36
+#define AUD_RESET_FRDDR_E 37
+#define AUD_RESET_TODDR_E 38
+
#endif
diff --git a/include/dt-bindings/reset/amlogic,meson8b-reset.h b/include/dt-bindings/reset/amlogic,meson8b-reset.h
index c614438bcbdb..fbc524a900da 100644
--- a/include/dt-bindings/reset/amlogic,meson8b-reset.h
+++ b/include/dt-bindings/reset/amlogic,meson8b-reset.h
@@ -46,9 +46,9 @@
#define RESET_VD_RMEM 64
#define RESET_AUDIN 65
#define RESET_DBLK 66
-#define RESET_PIC_DC 66
-#define RESET_PSC 66
-#define RESET_NAND 66
+#define RESET_PIC_DC 67
+#define RESET_PSC 68
+#define RESET_NAND 69
#define RESET_GE2D 70
#define RESET_PARSER_REG 71
#define RESET_PARSER_FETCH 72
diff --git a/include/dt-bindings/reset/nuvoton,npcm7xx-reset.h b/include/dt-bindings/reset/nuvoton,npcm7xx-reset.h
new file mode 100644
index 000000000000..df088e68a9ba
--- /dev/null
+++ b/include/dt-bindings/reset/nuvoton,npcm7xx-reset.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+// Copyright (c) 2019 Nuvoton Technology corporation.
+
+#ifndef _DT_BINDINGS_NPCM7XX_RESET_H
+#define _DT_BINDINGS_NPCM7XX_RESET_H
+
+#define NPCM7XX_RESET_IPSRST1 0x20
+#define NPCM7XX_RESET_IPSRST2 0x24
+#define NPCM7XX_RESET_IPSRST3 0x34
+
+/* Reset lines on IP1 reset module (NPCM7XX_RESET_IPSRST1) */
+#define NPCM7XX_RESET_FIU3 1
+#define NPCM7XX_RESET_UDC1 5
+#define NPCM7XX_RESET_EMC1 6
+#define NPCM7XX_RESET_UART_2_3 7
+#define NPCM7XX_RESET_UDC2 8
+#define NPCM7XX_RESET_PECI 9
+#define NPCM7XX_RESET_AES 10
+#define NPCM7XX_RESET_UART_0_1 11
+#define NPCM7XX_RESET_MC 12
+#define NPCM7XX_RESET_SMB2 13
+#define NPCM7XX_RESET_SMB3 14
+#define NPCM7XX_RESET_SMB4 15
+#define NPCM7XX_RESET_SMB5 16
+#define NPCM7XX_RESET_PWM_M0 18
+#define NPCM7XX_RESET_TIMER_0_4 19
+#define NPCM7XX_RESET_TIMER_5_9 20
+#define NPCM7XX_RESET_EMC2 21
+#define NPCM7XX_RESET_UDC4 22
+#define NPCM7XX_RESET_UDC5 23
+#define NPCM7XX_RESET_UDC6 24
+#define NPCM7XX_RESET_UDC3 25
+#define NPCM7XX_RESET_ADC 27
+#define NPCM7XX_RESET_SMB6 28
+#define NPCM7XX_RESET_SMB7 29
+#define NPCM7XX_RESET_SMB0 30
+#define NPCM7XX_RESET_SMB1 31
+
+/* Reset lines on IP2 reset module (NPCM7XX_RESET_IPSRST2) */
+#define NPCM7XX_RESET_MFT0 0
+#define NPCM7XX_RESET_MFT1 1
+#define NPCM7XX_RESET_MFT2 2
+#define NPCM7XX_RESET_MFT3 3
+#define NPCM7XX_RESET_MFT4 4
+#define NPCM7XX_RESET_MFT5 5
+#define NPCM7XX_RESET_MFT6 6
+#define NPCM7XX_RESET_MFT7 7
+#define NPCM7XX_RESET_MMC 8
+#define NPCM7XX_RESET_SDHC 9
+#define NPCM7XX_RESET_GFX_SYS 10
+#define NPCM7XX_RESET_AHB_PCIBRG 11
+#define NPCM7XX_RESET_VDMA 12
+#define NPCM7XX_RESET_ECE 13
+#define NPCM7XX_RESET_VCD 14
+#define NPCM7XX_RESET_OTP 16
+#define NPCM7XX_RESET_SIOX1 18
+#define NPCM7XX_RESET_SIOX2 19
+#define NPCM7XX_RESET_3DES 21
+#define NPCM7XX_RESET_PSPI1 22
+#define NPCM7XX_RESET_PSPI2 23
+#define NPCM7XX_RESET_GMAC2 25
+#define NPCM7XX_RESET_USB_HOST 26
+#define NPCM7XX_RESET_GMAC1 28
+#define NPCM7XX_RESET_CP 31
+
+/* Reset lines on IP3 reset module (NPCM7XX_RESET_IPSRST3) */
+#define NPCM7XX_RESET_PWM_M1 0
+#define NPCM7XX_RESET_SMB12 1
+#define NPCM7XX_RESET_SPIX 2
+#define NPCM7XX_RESET_SMB13 3
+#define NPCM7XX_RESET_UDC0 4
+#define NPCM7XX_RESET_UDC7 5
+#define NPCM7XX_RESET_UDC8 6
+#define NPCM7XX_RESET_UDC9 7
+#define NPCM7XX_RESET_PCI_MAILBOX 9
+#define NPCM7XX_RESET_SMB14 12
+#define NPCM7XX_RESET_SHA 13
+#define NPCM7XX_RESET_SEC_ECC 14
+#define NPCM7XX_RESET_PCIE_RC 15
+#define NPCM7XX_RESET_TIMER_10_14 16
+#define NPCM7XX_RESET_RNG 17
+#define NPCM7XX_RESET_SMB15 18
+#define NPCM7XX_RESET_SMB8 19
+#define NPCM7XX_RESET_SMB9 20
+#define NPCM7XX_RESET_SMB10 21
+#define NPCM7XX_RESET_SMB11 22
+#define NPCM7XX_RESET_ESPI 23
+#define NPCM7XX_RESET_USB_PHY_1 24
+#define NPCM7XX_RESET_USB_PHY_2 25
+
+#endif
diff --git a/include/dt-bindings/reset/qcom,gcc-ipq6018.h b/include/dt-bindings/reset/qcom,gcc-ipq6018.h
new file mode 100644
index 000000000000..02a220ad0105
--- /dev/null
+++ b/include/dt-bindings/reset/qcom,gcc-ipq6018.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#ifndef _DT_BINDINGS_RESET_IPQ_GCC_6018_H
+#define _DT_BINDINGS_RESET_IPQ_GCC_6018_H
+
+#define GCC_BLSP1_BCR 0
+#define GCC_BLSP1_QUP1_BCR 1
+#define GCC_BLSP1_UART1_BCR 2
+#define GCC_BLSP1_QUP2_BCR 3
+#define GCC_BLSP1_UART2_BCR 4
+#define GCC_BLSP1_QUP3_BCR 5
+#define GCC_BLSP1_UART3_BCR 6
+#define GCC_BLSP1_QUP4_BCR 7
+#define GCC_BLSP1_UART4_BCR 8
+#define GCC_BLSP1_QUP5_BCR 9
+#define GCC_BLSP1_UART5_BCR 10
+#define GCC_BLSP1_QUP6_BCR 11
+#define GCC_BLSP1_UART6_BCR 12
+#define GCC_IMEM_BCR 13
+#define GCC_SMMU_BCR 14
+#define GCC_APSS_TCU_BCR 15
+#define GCC_SMMU_XPU_BCR 16
+#define GCC_PCNOC_TBU_BCR 17
+#define GCC_SMMU_CFG_BCR 18
+#define GCC_PRNG_BCR 19
+#define GCC_BOOT_ROM_BCR 20
+#define GCC_CRYPTO_BCR 21
+#define GCC_WCSS_BCR 22
+#define GCC_WCSS_Q6_BCR 23
+#define GCC_NSS_BCR 24
+#define GCC_SEC_CTRL_BCR 25
+#define GCC_DDRSS_BCR 26
+#define GCC_SYSTEM_NOC_BCR 27
+#define GCC_PCNOC_BCR 28
+#define GCC_TCSR_BCR 29
+#define GCC_QDSS_BCR 30
+#define GCC_DCD_BCR 31
+#define GCC_MSG_RAM_BCR 32
+#define GCC_MPM_BCR 33
+#define GCC_SPDM_BCR 34
+#define GCC_RBCPR_BCR 35
+#define GCC_RBCPR_MX_BCR 36
+#define GCC_TLMM_BCR 37
+#define GCC_RBCPR_WCSS_BCR 38
+#define GCC_USB0_PHY_BCR 39
+#define GCC_USB3PHY_0_PHY_BCR 40
+#define GCC_USB0_BCR 41
+#define GCC_USB1_BCR 42
+#define GCC_QUSB2_0_PHY_BCR 43
+#define GCC_QUSB2_1_PHY_BCR 44
+#define GCC_SDCC1_BCR 45
+#define GCC_SNOC_BUS_TIMEOUT0_BCR 46
+#define GCC_SNOC_BUS_TIMEOUT1_BCR 47
+#define GCC_SNOC_BUS_TIMEOUT2_BCR 48
+#define GCC_PCNOC_BUS_TIMEOUT0_BCR 49
+#define GCC_PCNOC_BUS_TIMEOUT1_BCR 50
+#define GCC_PCNOC_BUS_TIMEOUT2_BCR 51
+#define GCC_PCNOC_BUS_TIMEOUT3_BCR 52
+#define GCC_PCNOC_BUS_TIMEOUT4_BCR 53
+#define GCC_PCNOC_BUS_TIMEOUT5_BCR 54
+#define GCC_PCNOC_BUS_TIMEOUT6_BCR 55
+#define GCC_PCNOC_BUS_TIMEOUT7_BCR 56
+#define GCC_PCNOC_BUS_TIMEOUT8_BCR 57
+#define GCC_PCNOC_BUS_TIMEOUT9_BCR 58
+#define GCC_UNIPHY0_BCR 59
+#define GCC_UNIPHY1_BCR 60
+#define GCC_CMN_12GPLL_BCR 61
+#define GCC_QPIC_BCR 62
+#define GCC_MDIO_BCR 63
+#define GCC_WCSS_CORE_TBU_BCR 64
+#define GCC_WCSS_Q6_TBU_BCR 65
+#define GCC_USB0_TBU_BCR 66
+#define GCC_PCIE0_TBU_BCR 67
+#define GCC_PCIE0_BCR 68
+#define GCC_PCIE0_PHY_BCR 69
+#define GCC_PCIE0PHY_PHY_BCR 70
+#define GCC_PCIE0_LINK_DOWN_BCR 71
+#define GCC_DCC_BCR 72
+#define GCC_APC0_VOLTAGE_DROOP_DETECTOR_BCR 73
+#define GCC_SMMU_CATS_BCR 74
+#define GCC_UBI0_AXI_ARES 75
+#define GCC_UBI0_AHB_ARES 76
+#define GCC_UBI0_NC_AXI_ARES 77
+#define GCC_UBI0_DBG_ARES 78
+#define GCC_UBI0_CORE_CLAMP_ENABLE 79
+#define GCC_UBI0_CLKRST_CLAMP_ENABLE 80
+#define GCC_UBI0_UTCM_ARES 81
+#define GCC_NSS_CFG_ARES 82
+#define GCC_NSS_NOC_ARES 83
+#define GCC_NSS_CRYPTO_ARES 84
+#define GCC_NSS_CSR_ARES 85
+#define GCC_NSS_CE_APB_ARES 86
+#define GCC_NSS_CE_AXI_ARES 87
+#define GCC_NSSNOC_CE_APB_ARES 88
+#define GCC_NSSNOC_CE_AXI_ARES 89
+#define GCC_NSSNOC_UBI0_AHB_ARES 90
+#define GCC_NSSNOC_SNOC_ARES 91
+#define GCC_NSSNOC_CRYPTO_ARES 92
+#define GCC_NSSNOC_ATB_ARES 93
+#define GCC_NSSNOC_QOSGEN_REF_ARES 94
+#define GCC_NSSNOC_TIMEOUT_REF_ARES 95
+#define GCC_PCIE0_PIPE_ARES 96
+#define GCC_PCIE0_SLEEP_ARES 97
+#define GCC_PCIE0_CORE_STICKY_ARES 98
+#define GCC_PCIE0_AXI_MASTER_ARES 99
+#define GCC_PCIE0_AXI_SLAVE_ARES 100
+#define GCC_PCIE0_AHB_ARES 101
+#define GCC_PCIE0_AXI_MASTER_STICKY_ARES 102
+#define GCC_PCIE0_AXI_SLAVE_STICKY_ARES 103
+#define GCC_PPE_FULL_RESET 104
+#define GCC_UNIPHY0_SOFT_RESET 105
+#define GCC_UNIPHY0_XPCS_RESET 106
+#define GCC_UNIPHY1_SOFT_RESET 107
+#define GCC_UNIPHY1_XPCS_RESET 108
+#define GCC_EDMA_HW_RESET 109
+#define GCC_ADSS_BCR 110
+#define GCC_NSS_NOC_TBU_BCR 111
+#define GCC_NSSPORT1_RESET 112
+#define GCC_NSSPORT2_RESET 113
+#define GCC_NSSPORT3_RESET 114
+#define GCC_NSSPORT4_RESET 115
+#define GCC_NSSPORT5_RESET 116
+#define GCC_UNIPHY0_PORT1_ARES 117
+#define GCC_UNIPHY0_PORT2_ARES 118
+#define GCC_UNIPHY0_PORT3_ARES 119
+#define GCC_UNIPHY0_PORT4_ARES 120
+#define GCC_UNIPHY0_PORT5_ARES 121
+#define GCC_UNIPHY0_PORT_4_5_RESET 122
+#define GCC_UNIPHY0_PORT_4_RESET 123
+#define GCC_LPASS_BCR 124
+#define GCC_UBI32_TBU_BCR 125
+#define GCC_LPASS_TBU_BCR 126
+#define GCC_WCSSAON_RESET 127
+#define GCC_LPASS_Q6_AXIM_ARES 128
+#define GCC_LPASS_Q6SS_TSCTR_1TO2_ARES 129
+#define GCC_LPASS_Q6SS_TRIG_ARES 130
+#define GCC_LPASS_Q6_ATBM_AT_ARES 131
+#define GCC_LPASS_Q6_PCLKDBG_ARES 132
+#define GCC_LPASS_CORE_AXIM_ARES 133
+#define GCC_LPASS_SNOC_CFG_ARES 134
+#define GCC_WCSS_DBG_ARES 135
+#define GCC_WCSS_ECAHB_ARES 136
+#define GCC_WCSS_ACMT_ARES 137
+#define GCC_WCSS_DBG_BDG_ARES 138
+#define GCC_WCSS_AHB_S_ARES 139
+#define GCC_WCSS_AXI_M_ARES 140
+#define GCC_Q6SS_DBG_ARES 141
+#define GCC_Q6_AHB_S_ARES 142
+#define GCC_Q6_AHB_ARES 143
+#define GCC_Q6_AXIM2_ARES 144
+#define GCC_Q6_AXIM_ARES 145
+#define GCC_UBI0_CORE_ARES 146
+
+#endif
diff --git a/include/dt-bindings/reset/realtek,rtd1295.h b/include/dt-bindings/reset/realtek,rtd1295.h
new file mode 100644
index 000000000000..2c0cb6afe816
--- /dev/null
+++ b/include/dt-bindings/reset/realtek,rtd1295.h
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: (GPL-2.0-or-later OR BSD-2-Clause) */
+/*
+ * Realtek RTD1295 reset controllers
+ *
+ * Copyright (c) 2017 Andreas Färber
+ */
+#ifndef DT_BINDINGS_RESET_RTD1295_H
+#define DT_BINDINGS_RESET_RTD1295_H
+
+/* soft reset 1 */
+#define RTD1295_RSTN_MISC 0
+#define RTD1295_RSTN_NAT 1
+#define RTD1295_RSTN_USB3_PHY0_POW 2
+#define RTD1295_RSTN_GSPI 3
+#define RTD1295_RSTN_USB3_P0_MDIO 4
+#define RTD1295_RSTN_SATA_0 5
+#define RTD1295_RSTN_USB 6
+#define RTD1295_RSTN_SATA_PHY_0 7
+#define RTD1295_RSTN_USB_PHY0 8
+#define RTD1295_RSTN_USB_PHY1 9
+#define RTD1295_RSTN_SATA_PHY_POW_0 10
+#define RTD1295_RSTN_SATA_FUNC_EXIST_0 11
+#define RTD1295_RSTN_HDMI 12
+#define RTD1295_RSTN_VE1 13
+#define RTD1295_RSTN_VE2 14
+#define RTD1295_RSTN_VE3 15
+#define RTD1295_RSTN_ETN 16
+#define RTD1295_RSTN_AIO 17
+#define RTD1295_RSTN_GPU 18
+#define RTD1295_RSTN_TVE 19
+#define RTD1295_RSTN_VO 20
+#define RTD1295_RSTN_LVDS 21
+#define RTD1295_RSTN_SE 22
+#define RTD1295_RSTN_DCU 23
+#define RTD1295_RSTN_DC_PHY 24
+#define RTD1295_RSTN_CP 25
+#define RTD1295_RSTN_MD 26
+#define RTD1295_RSTN_TP 27
+#define RTD1295_RSTN_AE 28
+#define RTD1295_RSTN_NF 29
+#define RTD1295_RSTN_MIPI 30
+#define RTD1295_RSTN_RSA 31
+
+/* soft reset 2 */
+#define RTD1295_RSTN_ACPU 0
+#define RTD1295_RSTN_JPEG 1
+#define RTD1295_RSTN_USB_PHY3 2
+#define RTD1295_RSTN_USB_PHY2 3
+#define RTD1295_RSTN_USB3_PHY1_POW 4
+#define RTD1295_RSTN_USB3_P1_MDIO 5
+#define RTD1295_RSTN_PCIE0_STITCH 6
+#define RTD1295_RSTN_PCIE0_PHY 7
+#define RTD1295_RSTN_PCIE0 8
+#define RTD1295_RSTN_PCR_CNT 9
+#define RTD1295_RSTN_CR 10
+#define RTD1295_RSTN_EMMC 11
+#define RTD1295_RSTN_SDIO 12
+#define RTD1295_RSTN_PCIE0_CORE 13
+#define RTD1295_RSTN_PCIE0_POWER 14
+#define RTD1295_RSTN_PCIE0_NONSTICH 15
+#define RTD1295_RSTN_PCIE1_PHY 16
+#define RTD1295_RSTN_PCIE1 17
+#define RTD1295_RSTN_I2C_5 18
+#define RTD1295_RSTN_PCIE1_STITCH 19
+#define RTD1295_RSTN_PCIE1_CORE 20
+#define RTD1295_RSTN_PCIE1_POWER 21
+#define RTD1295_RSTN_PCIE1_NONSTICH 22
+#define RTD1295_RSTN_I2C_4 23
+#define RTD1295_RSTN_I2C_3 24
+#define RTD1295_RSTN_I2C_2 25
+#define RTD1295_RSTN_I2C_1 26
+#define RTD1295_RSTN_UR2 27
+#define RTD1295_RSTN_UR1 28
+#define RTD1295_RSTN_MISC_SC 29
+#define RTD1295_RSTN_CBUS_TX 30
+#define RTD1295_RSTN_SDS_PHY 31
+
+/* soft reset 4 */
+#define RTD1295_RSTN_DCPHY_CRT 0
+#define RTD1295_RSTN_DCPHY_ALERT_RX 1
+#define RTD1295_RSTN_DCPHY_PTR 2
+#define RTD1295_RSTN_DCPHY_LDO 3
+#define RTD1295_RSTN_DCPHY_SSC_DIG 4
+#define RTD1295_RSTN_HDMIRX 5
+#define RTD1295_RSTN_CBUSRX 6
+#define RTD1295_RSTN_SATA_PHY_POW_1 7
+#define RTD1295_RSTN_SATA_FUNC_EXIST_1 8
+#define RTD1295_RSTN_SATA_PHY_1 9
+#define RTD1295_RSTN_SATA_1 10
+#define RTD1295_RSTN_FAN 11
+#define RTD1295_RSTN_HDMIRX_WRAP 12
+#define RTD1295_RSTN_PCIE0_PHY_MDIO 13
+#define RTD1295_RSTN_PCIE1_PHY_MDIO 14
+#define RTD1295_RSTN_DISP 15
+
+/* iso reset */
+#define RTD1295_ISO_RSTN_IR 1
+#define RTD1295_ISO_RSTN_CEC0 2
+#define RTD1295_ISO_RSTN_CEC1 3
+#define RTD1295_ISO_RSTN_DP 4
+#define RTD1295_ISO_RSTN_CBUSTX 5
+#define RTD1295_ISO_RSTN_CBUSRX 6
+#define RTD1295_ISO_RSTN_EFUSE 7
+#define RTD1295_ISO_RSTN_UR0 8
+#define RTD1295_ISO_RSTN_GMAC 9
+#define RTD1295_ISO_RSTN_GPHY 10
+#define RTD1295_ISO_RSTN_I2C_0 11
+#define RTD1295_ISO_RSTN_I2C_1 12
+#define RTD1295_ISO_RSTN_CBUS 13
+
+#endif
diff --git a/include/dt-bindings/sound/samsung-i2s.h b/include/dt-bindings/sound/samsung-i2s.h
index 77545f14c379..250de0d6c734 100644
--- a/include/dt-bindings/sound/samsung-i2s.h
+++ b/include/dt-bindings/sound/samsung-i2s.h
@@ -2,8 +2,14 @@
#ifndef _DT_BINDINGS_SAMSUNG_I2S_H
#define _DT_BINDINGS_SAMSUNG_I2S_H
-#define CLK_I2S_CDCLK 0
-#define CLK_I2S_RCLK_SRC 1
-#define CLK_I2S_RCLK_PSR 2
+#define CLK_I2S_CDCLK 0 /* the CDCLK (CODECLKO) gate clock */
+
+#define CLK_I2S_RCLK_SRC 1 /* the RCLKSRC mux clock (corresponding to
+ * RCLKSRC bit in IISMOD register)
+ */
+
+#define CLK_I2S_RCLK_PSR 2 /* the RCLK prescaler divider clock
+ * (corresponding to the IISPSR register)
+ */
#endif /* _DT_BINDINGS_SAMSUNG_I2S_H */
diff --git a/include/dt-bindings/thermal/thermal_exynos.h b/include/dt-bindings/thermal/thermal_exynos.h
index 642e4e7f4084..52fcb51dda3c 100644
--- a/include/dt-bindings/thermal/thermal_exynos.h
+++ b/include/dt-bindings/thermal/thermal_exynos.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0+ */
/*
- * thermal_exynos.h - Samsung EXYNOS TMU device tree definitions
+ * thermal_exynos.h - Samsung Exynos TMU device tree definitions
*
* Copyright (C) 2014 Samsung Electronics
* Lukasz Majewski <l.majewski@samsung.com>
diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
index c1a96fdf598b..fb8b07daa9d1 100644
--- a/include/keys/system_keyring.h
+++ b/include/keys/system_keyring.h
@@ -35,12 +35,18 @@ extern int restrict_link_by_builtin_and_secondary_trusted(
extern int mark_hash_blacklisted(const char *hash);
extern int is_hash_blacklisted(const u8 *hash, size_t hash_len,
const char *type);
+extern int is_binary_blacklisted(const u8 *hash, size_t hash_len);
#else
static inline int is_hash_blacklisted(const u8 *hash, size_t hash_len,
const char *type)
{
return 0;
}
+
+static inline int is_binary_blacklisted(const u8 *hash, size_t hash_len)
+{
+ return 0;
+}
#endif
#ifdef CONFIG_IMA_BLACKLIST_KEYRING
diff --git a/include/keys/trusted.h b/include/keys/trusted_tpm.h
index 0071298b9b28..a56d8e1298f2 100644
--- a/include/keys/trusted.h
+++ b/include/keys/trusted_tpm.h
@@ -1,14 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __TRUSTED_KEY_H
-#define __TRUSTED_KEY_H
+#ifndef __TRUSTED_TPM_H
+#define __TRUSTED_TPM_H
+
+#include <keys/trusted-type.h>
+#include <linux/tpm_command.h>
/* implementation specific TPM constants */
#define MAX_BUF_SIZE 1024
#define TPM_GETRANDOM_SIZE 14
-#define TPM_OSAP_SIZE 36
-#define TPM_OIAP_SIZE 10
-#define TPM_SEAL_SIZE 87
-#define TPM_UNSEAL_SIZE 104
#define TPM_SIZE_OFFSET 2
#define TPM_RETURN_OFFSET 6
#define TPM_DATA_OFFSET 10
@@ -17,13 +16,6 @@
#define LOAD32N(buffer, offset) (*(uint32_t *)&buffer[offset])
#define LOAD16(buffer, offset) (ntohs(*(uint16_t *)&buffer[offset]))
-struct tpm_buf {
- int len;
- unsigned char data[MAX_BUF_SIZE];
-};
-
-#define INIT_BUF(tb) (tb->len = 0)
-
struct osapsess {
uint32_t handle;
unsigned char secret[SHA1_DIGEST_SIZE];
@@ -48,6 +40,13 @@ int TSS_checkhmac1(unsigned char *buffer,
int trusted_tpm_send(unsigned char *cmd, size_t buflen);
int oiap(struct tpm_buf *tb, uint32_t *handle, unsigned char *nonce);
+int tpm2_seal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options);
+int tpm2_unseal_trusted(struct tpm_chip *chip,
+ struct trusted_key_payload *payload,
+ struct trusted_key_options *options);
+
#define TPM_DEBUG 0
#if TPM_DEBUG
@@ -109,28 +108,4 @@ static inline void dump_tpm_buf(unsigned char *buf)
{
}
#endif
-
-static inline void store8(struct tpm_buf *buf, const unsigned char value)
-{
- buf->data[buf->len++] = value;
-}
-
-static inline void store16(struct tpm_buf *buf, const uint16_t value)
-{
- *(uint16_t *) & buf->data[buf->len] = htons(value);
- buf->len += sizeof value;
-}
-
-static inline void store32(struct tpm_buf *buf, const uint32_t value)
-{
- *(uint32_t *) & buf->data[buf->len] = htonl(value);
- buf->len += sizeof value;
-}
-
-static inline void storebytes(struct tpm_buf *buf, const unsigned char *in,
- const int len)
-{
- memcpy(buf->data + buf->len, in, len);
- buf->len += len;
-}
#endif
diff --git a/include/kunit/assert.h b/include/kunit/assert.h
new file mode 100644
index 000000000000..ad889b539ab3
--- /dev/null
+++ b/include/kunit/assert.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Assertion and expectation serialization API.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#ifndef _KUNIT_ASSERT_H
+#define _KUNIT_ASSERT_H
+
+#include <linux/err.h>
+#include <linux/kernel.h>
+
+struct kunit;
+struct string_stream;
+
+/**
+ * enum kunit_assert_type - Type of expectation/assertion.
+ * @KUNIT_ASSERTION: Used to denote that a kunit_assert represents an assertion.
+ * @KUNIT_EXPECTATION: Denotes that a kunit_assert represents an expectation.
+ *
+ * Used in conjunction with a &struct kunit_assert to denote whether it
+ * represents an expectation or an assertion.
+ */
+enum kunit_assert_type {
+ KUNIT_ASSERTION,
+ KUNIT_EXPECTATION,
+};
+
+/**
+ * struct kunit_assert - Data for printing a failed assertion or expectation.
+ * @test: the test case this expectation/assertion is associated with.
+ * @type: the type (either an expectation or an assertion) of this kunit_assert.
+ * @line: the source code line number that the expectation/assertion is at.
+ * @file: the file path of the source file that the expectation/assertion is in.
+ * @message: an optional message to provide additional context.
+ * @format: a function which formats the data in this kunit_assert to a string.
+ *
+ * Represents a failed expectation/assertion. Contains all the data necessary to
+ * format a string to a user reporting the failure.
+ */
+struct kunit_assert {
+ struct kunit *test;
+ enum kunit_assert_type type;
+ int line;
+ const char *file;
+ struct va_format message;
+ void (*format)(const struct kunit_assert *assert,
+ struct string_stream *stream);
+};
+
+/**
+ * KUNIT_INIT_VA_FMT_NULL - Default initializer for struct va_format.
+ *
+ * Used inside a struct initialization block to initialize struct va_format to
+ * default values where fmt and va are null.
+ */
+#define KUNIT_INIT_VA_FMT_NULL { .fmt = NULL, .va = NULL }
+
+/**
+ * KUNIT_INIT_ASSERT_STRUCT() - Initializer for a &struct kunit_assert.
+ * @kunit: The test case that this expectation/assertion is associated with.
+ * @assert_type: The type (assertion or expectation) of this kunit_assert.
+ * @fmt: The formatting function which builds a string out of this kunit_assert.
+ *
+ * The base initializer for a &struct kunit_assert.
+ */
+#define KUNIT_INIT_ASSERT_STRUCT(kunit, assert_type, fmt) { \
+ .test = kunit, \
+ .type = assert_type, \
+ .file = __FILE__, \
+ .line = __LINE__, \
+ .message = KUNIT_INIT_VA_FMT_NULL, \
+ .format = fmt \
+}
+
+void kunit_base_assert_format(const struct kunit_assert *assert,
+ struct string_stream *stream);
+
+void kunit_assert_print_msg(const struct kunit_assert *assert,
+ struct string_stream *stream);
+
+/**
+ * struct kunit_fail_assert - Represents a plain fail expectation/assertion.
+ * @assert: The parent of this type.
+ *
+ * Represents a simple KUNIT_FAIL/KUNIT_ASSERT_FAILURE that always fails.
+ */
+struct kunit_fail_assert {
+ struct kunit_assert assert;
+};
+
+void kunit_fail_assert_format(const struct kunit_assert *assert,
+ struct string_stream *stream);
+
+/**
+ * KUNIT_INIT_FAIL_ASSERT_STRUCT() - Initializer for &struct kunit_fail_assert.
+ * @test: The test case that this expectation/assertion is associated with.
+ * @type: The type (assertion or expectation) of this kunit_assert.
+ *
+ * Initializes a &struct kunit_fail_assert. Intended to be used in
+ * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
+ */
+#define KUNIT_INIT_FAIL_ASSERT_STRUCT(test, type) { \
+ .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
+ type, \
+ kunit_fail_assert_format) \
+}
+
+/**
+ * struct kunit_unary_assert - Represents a KUNIT_{EXPECT|ASSERT}_{TRUE|FALSE}
+ * @assert: The parent of this type.
+ * @condition: A string representation of a conditional expression.
+ * @expected_true: True if of type KUNIT_{EXPECT|ASSERT}_TRUE, false otherwise.
+ *
+ * Represents a simple expectation or assertion that simply asserts something is
+ * true or false. In other words, represents the expectations:
+ * KUNIT_{EXPECT|ASSERT}_{TRUE|FALSE}
+ */
+struct kunit_unary_assert {
+ struct kunit_assert assert;
+ const char *condition;
+ bool expected_true;
+};
+
+void kunit_unary_assert_format(const struct kunit_assert *assert,
+ struct string_stream *stream);
+
+/**
+ * KUNIT_INIT_UNARY_ASSERT_STRUCT() - Initializes &struct kunit_unary_assert.
+ * @test: The test case that this expectation/assertion is associated with.
+ * @type: The type (assertion or expectation) of this kunit_assert.
+ * @cond: A string representation of the expression asserted true or false.
+ * @expect_true: True if of type KUNIT_{EXPECT|ASSERT}_TRUE, false otherwise.
+ *
+ * Initializes a &struct kunit_unary_assert. Intended to be used in
+ * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
+ */
+#define KUNIT_INIT_UNARY_ASSERT_STRUCT(test, type, cond, expect_true) { \
+ .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
+ type, \
+ kunit_unary_assert_format), \
+ .condition = cond, \
+ .expected_true = expect_true \
+}
+
+/**
+ * struct kunit_ptr_not_err_assert - An expectation/assertion that a pointer is
+ * not NULL and not a -errno.
+ * @assert: The parent of this type.
+ * @text: A string representation of the expression passed to the expectation.
+ * @value: The actual evaluated pointer value of the expression.
+ *
+ * Represents an expectation/assertion that a pointer is not null and is does
+ * not contain a -errno. (See IS_ERR_OR_NULL().)
+ */
+struct kunit_ptr_not_err_assert {
+ struct kunit_assert assert;
+ const char *text;
+ const void *value;
+};
+
+void kunit_ptr_not_err_assert_format(const struct kunit_assert *assert,
+ struct string_stream *stream);
+
+/**
+ * KUNIT_INIT_PTR_NOT_ERR_ASSERT_STRUCT() - Initializes a
+ * &struct kunit_ptr_not_err_assert.
+ * @test: The test case that this expectation/assertion is associated with.
+ * @type: The type (assertion or expectation) of this kunit_assert.
+ * @txt: A string representation of the expression passed to the expectation.
+ * @val: The actual evaluated pointer value of the expression.
+ *
+ * Initializes a &struct kunit_ptr_not_err_assert. Intended to be used in
+ * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
+ */
+#define KUNIT_INIT_PTR_NOT_ERR_STRUCT(test, type, txt, val) { \
+ .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
+ type, \
+ kunit_ptr_not_err_assert_format), \
+ .text = txt, \
+ .value = val \
+}
+
+/**
+ * struct kunit_binary_assert - An expectation/assertion that compares two
+ * non-pointer values (for example, KUNIT_EXPECT_EQ(test, 1 + 1, 2)).
+ * @assert: The parent of this type.
+ * @operation: A string representation of the comparison operator (e.g. "==").
+ * @left_text: A string representation of the expression in the left slot.
+ * @left_value: The actual evaluated value of the expression in the left slot.
+ * @right_text: A string representation of the expression in the right slot.
+ * @right_value: The actual evaluated value of the expression in the right slot.
+ *
+ * Represents an expectation/assertion that compares two non-pointer values. For
+ * example, to expect that 1 + 1 == 2, you can use the expectation
+ * KUNIT_EXPECT_EQ(test, 1 + 1, 2);
+ */
+struct kunit_binary_assert {
+ struct kunit_assert assert;
+ const char *operation;
+ const char *left_text;
+ long long left_value;
+ const char *right_text;
+ long long right_value;
+};
+
+void kunit_binary_assert_format(const struct kunit_assert *assert,
+ struct string_stream *stream);
+
+/**
+ * KUNIT_INIT_BINARY_ASSERT_STRUCT() - Initializes a
+ * &struct kunit_binary_assert.
+ * @test: The test case that this expectation/assertion is associated with.
+ * @type: The type (assertion or expectation) of this kunit_assert.
+ * @op_str: A string representation of the comparison operator (e.g. "==").
+ * @left_str: A string representation of the expression in the left slot.
+ * @left_val: The actual evaluated value of the expression in the left slot.
+ * @right_str: A string representation of the expression in the right slot.
+ * @right_val: The actual evaluated value of the expression in the right slot.
+ *
+ * Initializes a &struct kunit_binary_assert. Intended to be used in
+ * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
+ */
+#define KUNIT_INIT_BINARY_ASSERT_STRUCT(test, \
+ type, \
+ op_str, \
+ left_str, \
+ left_val, \
+ right_str, \
+ right_val) { \
+ .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
+ type, \
+ kunit_binary_assert_format), \
+ .operation = op_str, \
+ .left_text = left_str, \
+ .left_value = left_val, \
+ .right_text = right_str, \
+ .right_value = right_val \
+}
+
+/**
+ * struct kunit_binary_ptr_assert - An expectation/assertion that compares two
+ * pointer values (for example, KUNIT_EXPECT_PTR_EQ(test, foo, bar)).
+ * @assert: The parent of this type.
+ * @operation: A string representation of the comparison operator (e.g. "==").
+ * @left_text: A string representation of the expression in the left slot.
+ * @left_value: The actual evaluated value of the expression in the left slot.
+ * @right_text: A string representation of the expression in the right slot.
+ * @right_value: The actual evaluated value of the expression in the right slot.
+ *
+ * Represents an expectation/assertion that compares two pointer values. For
+ * example, to expect that foo and bar point to the same thing, you can use the
+ * expectation KUNIT_EXPECT_PTR_EQ(test, foo, bar);
+ */
+struct kunit_binary_ptr_assert {
+ struct kunit_assert assert;
+ const char *operation;
+ const char *left_text;
+ const void *left_value;
+ const char *right_text;
+ const void *right_value;
+};
+
+void kunit_binary_ptr_assert_format(const struct kunit_assert *assert,
+ struct string_stream *stream);
+
+/**
+ * KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT() - Initializes a
+ * &struct kunit_binary_ptr_assert.
+ * @test: The test case that this expectation/assertion is associated with.
+ * @type: The type (assertion or expectation) of this kunit_assert.
+ * @op_str: A string representation of the comparison operator (e.g. "==").
+ * @left_str: A string representation of the expression in the left slot.
+ * @left_val: The actual evaluated value of the expression in the left slot.
+ * @right_str: A string representation of the expression in the right slot.
+ * @right_val: The actual evaluated value of the expression in the right slot.
+ *
+ * Initializes a &struct kunit_binary_ptr_assert. Intended to be used in
+ * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
+ */
+#define KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT(test, \
+ type, \
+ op_str, \
+ left_str, \
+ left_val, \
+ right_str, \
+ right_val) { \
+ .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
+ type, \
+ kunit_binary_ptr_assert_format), \
+ .operation = op_str, \
+ .left_text = left_str, \
+ .left_value = left_val, \
+ .right_text = right_str, \
+ .right_value = right_val \
+}
+
+/**
+ * struct kunit_binary_str_assert - An expectation/assertion that compares two
+ * string values (for example, KUNIT_EXPECT_STREQ(test, foo, "bar")).
+ * @assert: The parent of this type.
+ * @operation: A string representation of the comparison operator (e.g. "==").
+ * @left_text: A string representation of the expression in the left slot.
+ * @left_value: The actual evaluated value of the expression in the left slot.
+ * @right_text: A string representation of the expression in the right slot.
+ * @right_value: The actual evaluated value of the expression in the right slot.
+ *
+ * Represents an expectation/assertion that compares two string values. For
+ * example, to expect that the string in foo is equal to "bar", you can use the
+ * expectation KUNIT_EXPECT_STREQ(test, foo, "bar");
+ */
+struct kunit_binary_str_assert {
+ struct kunit_assert assert;
+ const char *operation;
+ const char *left_text;
+ const char *left_value;
+ const char *right_text;
+ const char *right_value;
+};
+
+void kunit_binary_str_assert_format(const struct kunit_assert *assert,
+ struct string_stream *stream);
+
+/**
+ * KUNIT_INIT_BINARY_STR_ASSERT_STRUCT() - Initializes a
+ * &struct kunit_binary_str_assert.
+ * @test: The test case that this expectation/assertion is associated with.
+ * @type: The type (assertion or expectation) of this kunit_assert.
+ * @op_str: A string representation of the comparison operator (e.g. "==").
+ * @left_str: A string representation of the expression in the left slot.
+ * @left_val: The actual evaluated value of the expression in the left slot.
+ * @right_str: A string representation of the expression in the right slot.
+ * @right_val: The actual evaluated value of the expression in the right slot.
+ *
+ * Initializes a &struct kunit_binary_str_assert. Intended to be used in
+ * KUNIT_EXPECT_* and KUNIT_ASSERT_* macros.
+ */
+#define KUNIT_INIT_BINARY_STR_ASSERT_STRUCT(test, \
+ type, \
+ op_str, \
+ left_str, \
+ left_val, \
+ right_str, \
+ right_val) { \
+ .assert = KUNIT_INIT_ASSERT_STRUCT(test, \
+ type, \
+ kunit_binary_str_assert_format), \
+ .operation = op_str, \
+ .left_text = left_str, \
+ .left_value = left_val, \
+ .right_text = right_str, \
+ .right_value = right_val \
+}
+
+#endif /* _KUNIT_ASSERT_H */
diff --git a/include/kunit/test.h b/include/kunit/test.h
new file mode 100644
index 000000000000..2dfb550c6723
--- /dev/null
+++ b/include/kunit/test.h
@@ -0,0 +1,1507 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Base unit test (KUnit) API.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#ifndef _KUNIT_TEST_H
+#define _KUNIT_TEST_H
+
+#include <kunit/assert.h>
+#include <kunit/try-catch.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+struct kunit_resource;
+
+typedef int (*kunit_resource_init_t)(struct kunit_resource *, void *);
+typedef void (*kunit_resource_free_t)(struct kunit_resource *);
+
+/**
+ * struct kunit_resource - represents a *test managed resource*
+ * @allocation: for the user to store arbitrary data.
+ * @free: a user supplied function to free the resource. Populated by
+ * kunit_alloc_resource().
+ *
+ * Represents a *test managed resource*, a resource which will automatically be
+ * cleaned up at the end of a test case.
+ *
+ * Example:
+ *
+ * .. code-block:: c
+ *
+ * struct kunit_kmalloc_params {
+ * size_t size;
+ * gfp_t gfp;
+ * };
+ *
+ * static int kunit_kmalloc_init(struct kunit_resource *res, void *context)
+ * {
+ * struct kunit_kmalloc_params *params = context;
+ * res->allocation = kmalloc(params->size, params->gfp);
+ *
+ * if (!res->allocation)
+ * return -ENOMEM;
+ *
+ * return 0;
+ * }
+ *
+ * static void kunit_kmalloc_free(struct kunit_resource *res)
+ * {
+ * kfree(res->allocation);
+ * }
+ *
+ * void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp)
+ * {
+ * struct kunit_kmalloc_params params;
+ * struct kunit_resource *res;
+ *
+ * params.size = size;
+ * params.gfp = gfp;
+ *
+ * res = kunit_alloc_resource(test, kunit_kmalloc_init,
+ * kunit_kmalloc_free, &params);
+ * if (res)
+ * return res->allocation;
+ *
+ * return NULL;
+ * }
+ */
+struct kunit_resource {
+ void *allocation;
+ kunit_resource_free_t free;
+
+ /* private: internal use only. */
+ struct list_head node;
+};
+
+struct kunit;
+
+/**
+ * struct kunit_case - represents an individual test case.
+ *
+ * @run_case: the function representing the actual test case.
+ * @name: the name of the test case.
+ *
+ * A test case is a function with the signature,
+ * ``void (*)(struct kunit *)``
+ * that makes expectations and assertions (see KUNIT_EXPECT_TRUE() and
+ * KUNIT_ASSERT_TRUE()) about code under test. Each test case is associated
+ * with a &struct kunit_suite and will be run after the suite's init
+ * function and followed by the suite's exit function.
+ *
+ * A test case should be static and should only be created with the
+ * KUNIT_CASE() macro; additionally, every array of test cases should be
+ * terminated with an empty test case.
+ *
+ * Example:
+ *
+ * .. code-block:: c
+ *
+ * void add_test_basic(struct kunit *test)
+ * {
+ * KUNIT_EXPECT_EQ(test, 1, add(1, 0));
+ * KUNIT_EXPECT_EQ(test, 2, add(1, 1));
+ * KUNIT_EXPECT_EQ(test, 0, add(-1, 1));
+ * KUNIT_EXPECT_EQ(test, INT_MAX, add(0, INT_MAX));
+ * KUNIT_EXPECT_EQ(test, -1, add(INT_MAX, INT_MIN));
+ * }
+ *
+ * static struct kunit_case example_test_cases[] = {
+ * KUNIT_CASE(add_test_basic),
+ * {}
+ * };
+ *
+ */
+struct kunit_case {
+ void (*run_case)(struct kunit *test);
+ const char *name;
+
+ /* private: internal use only. */
+ bool success;
+};
+
+/**
+ * KUNIT_CASE - A helper for creating a &struct kunit_case
+ *
+ * @test_name: a reference to a test case function.
+ *
+ * Takes a symbol for a function representing a test case and creates a
+ * &struct kunit_case object from it. See the documentation for
+ * &struct kunit_case for an example on how to use it.
+ */
+#define KUNIT_CASE(test_name) { .run_case = test_name, .name = #test_name }
+
+/**
+ * struct kunit_suite - describes a related collection of &struct kunit_case
+ *
+ * @name: the name of the test. Purely informational.
+ * @init: called before every test case.
+ * @exit: called after every test case.
+ * @test_cases: a null terminated array of test cases.
+ *
+ * A kunit_suite is a collection of related &struct kunit_case s, such that
+ * @init is called before every test case and @exit is called after every
+ * test case, similar to the notion of a *test fixture* or a *test class*
+ * in other unit testing frameworks like JUnit or Googletest.
+ *
+ * Every &struct kunit_case must be associated with a kunit_suite for KUnit
+ * to run it.
+ */
+struct kunit_suite {
+ const char name[256];
+ int (*init)(struct kunit *test);
+ void (*exit)(struct kunit *test);
+ struct kunit_case *test_cases;
+};
+
+/**
+ * struct kunit - represents a running instance of a test.
+ *
+ * @priv: for user to store arbitrary data. Commonly used to pass data
+ * created in the init function (see &struct kunit_suite).
+ *
+ * Used to store information about the current context under which the test
+ * is running. Most of this data is private and should only be accessed
+ * indirectly via public functions; the one exception is @priv which can be
+ * used by the test writer to store arbitrary data.
+ */
+struct kunit {
+ void *priv;
+
+ /* private: internal use only. */
+ const char *name; /* Read only after initialization! */
+ struct kunit_try_catch try_catch;
+ /*
+ * success starts as true, and may only be set to false during a
+ * test case; thus, it is safe to update this across multiple
+ * threads using WRITE_ONCE; however, as a consequence, it may only
+ * be read after the test case finishes once all threads associated
+ * with the test case have terminated.
+ */
+ bool success; /* Read only after test_case finishes! */
+ spinlock_t lock; /* Guards all mutable test state. */
+ /*
+ * Because resources is a list that may be updated multiple times (with
+ * new resources) from any thread associated with a test case, we must
+ * protect it with some type of lock.
+ */
+ struct list_head resources; /* Protected by lock. */
+};
+
+void kunit_init_test(struct kunit *test, const char *name);
+
+int kunit_run_tests(struct kunit_suite *suite);
+
+/**
+ * kunit_test_suites() - used to register one or more &struct kunit_suite
+ * with KUnit.
+ *
+ * @suites: a statically allocated list of &struct kunit_suite.
+ *
+ * Registers @suites with the test framework. See &struct kunit_suite for
+ * more information.
+ *
+ * When builtin, KUnit tests are all run as late_initcalls; this means
+ * that they cannot test anything where tests must run at a different init
+ * phase. One significant restriction resulting from this is that KUnit
+ * cannot reliably test anything that is initialize in the late_init phase;
+ * another is that KUnit is useless to test things that need to be run in
+ * an earlier init phase.
+ *
+ * An alternative is to build the tests as a module. Because modules
+ * do not support multiple late_initcall()s, we need to initialize an
+ * array of suites for a module.
+ *
+ * TODO(brendanhiggins@google.com): Don't run all KUnit tests as
+ * late_initcalls. I have some future work planned to dispatch all KUnit
+ * tests from the same place, and at the very least to do so after
+ * everything else is definitely initialized.
+ */
+#define kunit_test_suites(...) \
+ static struct kunit_suite *suites[] = { __VA_ARGS__, NULL}; \
+ static int kunit_test_suites_init(void) \
+ { \
+ unsigned int i; \
+ for (i = 0; suites[i] != NULL; i++) \
+ kunit_run_tests(suites[i]); \
+ return 0; \
+ } \
+ late_initcall(kunit_test_suites_init); \
+ static void __exit kunit_test_suites_exit(void) \
+ { \
+ return; \
+ } \
+ module_exit(kunit_test_suites_exit)
+
+#define kunit_test_suite(suite) kunit_test_suites(&suite)
+
+/*
+ * Like kunit_alloc_resource() below, but returns the struct kunit_resource
+ * object that contains the allocation. This is mostly for testing purposes.
+ */
+struct kunit_resource *kunit_alloc_and_get_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ gfp_t internal_gfp,
+ void *context);
+
+/**
+ * kunit_alloc_resource() - Allocates a *test managed resource*.
+ * @test: The test context object.
+ * @init: a user supplied function to initialize the resource.
+ * @free: a user supplied function to free the resource.
+ * @internal_gfp: gfp to use for internal allocations, if unsure, use GFP_KERNEL
+ * @context: for the user to pass in arbitrary data to the init function.
+ *
+ * Allocates a *test managed resource*, a resource which will automatically be
+ * cleaned up at the end of a test case. See &struct kunit_resource for an
+ * example.
+ *
+ * NOTE: KUnit needs to allocate memory for each kunit_resource object. You must
+ * specify an @internal_gfp that is compatible with the use context of your
+ * resource.
+ */
+static inline void *kunit_alloc_resource(struct kunit *test,
+ kunit_resource_init_t init,
+ kunit_resource_free_t free,
+ gfp_t internal_gfp,
+ void *context)
+{
+ struct kunit_resource *res;
+
+ res = kunit_alloc_and_get_resource(test, init, free, internal_gfp,
+ context);
+
+ if (res)
+ return res->allocation;
+
+ return NULL;
+}
+
+typedef bool (*kunit_resource_match_t)(struct kunit *test,
+ const void *res,
+ void *match_data);
+
+/**
+ * kunit_resource_instance_match() - Match a resource with the same instance.
+ * @test: Test case to which the resource belongs.
+ * @res: The data stored in kunit_resource->allocation.
+ * @match_data: The resource pointer to match against.
+ *
+ * An instance of kunit_resource_match_t that matches a resource whose
+ * allocation matches @match_data.
+ */
+static inline bool kunit_resource_instance_match(struct kunit *test,
+ const void *res,
+ void *match_data)
+{
+ return res == match_data;
+}
+
+/**
+ * kunit_resource_destroy() - Find a kunit_resource and destroy it.
+ * @test: Test case to which the resource belongs.
+ * @match: Match function. Returns whether a given resource matches @match_data.
+ * @free: Must match free on the kunit_resource to free.
+ * @match_data: Data passed into @match.
+ *
+ * Free the latest kunit_resource of @test for which @free matches the
+ * kunit_resource_free_t associated with the resource and for which @match
+ * returns true.
+ *
+ * RETURNS:
+ * 0 if kunit_resource is found and freed, -ENOENT if not found.
+ */
+int kunit_resource_destroy(struct kunit *test,
+ kunit_resource_match_t match,
+ kunit_resource_free_t free,
+ void *match_data);
+
+/**
+ * kunit_kmalloc() - Like kmalloc() except the allocation is *test managed*.
+ * @test: The test context object.
+ * @size: The size in bytes of the desired memory.
+ * @gfp: flags passed to underlying kmalloc().
+ *
+ * Just like `kmalloc(...)`, except the allocation is managed by the test case
+ * and is automatically cleaned up after the test case concludes. See &struct
+ * kunit_resource for more information.
+ */
+void *kunit_kmalloc(struct kunit *test, size_t size, gfp_t gfp);
+
+/**
+ * kunit_kfree() - Like kfree except for allocations managed by KUnit.
+ * @test: The test case to which the resource belongs.
+ * @ptr: The memory allocation to free.
+ */
+void kunit_kfree(struct kunit *test, const void *ptr);
+
+/**
+ * kunit_kzalloc() - Just like kunit_kmalloc(), but zeroes the allocation.
+ * @test: The test context object.
+ * @size: The size in bytes of the desired memory.
+ * @gfp: flags passed to underlying kmalloc().
+ *
+ * See kzalloc() and kunit_kmalloc() for more information.
+ */
+static inline void *kunit_kzalloc(struct kunit *test, size_t size, gfp_t gfp)
+{
+ return kunit_kmalloc(test, size, gfp | __GFP_ZERO);
+}
+
+void kunit_cleanup(struct kunit *test);
+
+#define kunit_printk(lvl, test, fmt, ...) \
+ printk(lvl "\t# %s: " fmt, (test)->name, ##__VA_ARGS__)
+
+/**
+ * kunit_info() - Prints an INFO level message associated with @test.
+ *
+ * @test: The test context object.
+ * @fmt: A printk() style format string.
+ *
+ * Prints an info level message associated with the test suite being run.
+ * Takes a variable number of format parameters just like printk().
+ */
+#define kunit_info(test, fmt, ...) \
+ kunit_printk(KERN_INFO, test, fmt, ##__VA_ARGS__)
+
+/**
+ * kunit_warn() - Prints a WARN level message associated with @test.
+ *
+ * @test: The test context object.
+ * @fmt: A printk() style format string.
+ *
+ * Prints a warning level message.
+ */
+#define kunit_warn(test, fmt, ...) \
+ kunit_printk(KERN_WARNING, test, fmt, ##__VA_ARGS__)
+
+/**
+ * kunit_err() - Prints an ERROR level message associated with @test.
+ *
+ * @test: The test context object.
+ * @fmt: A printk() style format string.
+ *
+ * Prints an error level message.
+ */
+#define kunit_err(test, fmt, ...) \
+ kunit_printk(KERN_ERR, test, fmt, ##__VA_ARGS__)
+
+/**
+ * KUNIT_SUCCEED() - A no-op expectation. Only exists for code clarity.
+ * @test: The test context object.
+ *
+ * The opposite of KUNIT_FAIL(), it is an expectation that cannot fail. In other
+ * words, it does nothing and only exists for code clarity. See
+ * KUNIT_EXPECT_TRUE() for more information.
+ */
+#define KUNIT_SUCCEED(test) do {} while (0)
+
+void kunit_do_assertion(struct kunit *test,
+ struct kunit_assert *assert,
+ bool pass,
+ const char *fmt, ...);
+
+#define KUNIT_ASSERTION(test, pass, assert_class, INITIALIZER, fmt, ...) do { \
+ struct assert_class __assertion = INITIALIZER; \
+ kunit_do_assertion(test, \
+ &__assertion.assert, \
+ pass, \
+ fmt, \
+ ##__VA_ARGS__); \
+} while (0)
+
+
+#define KUNIT_FAIL_ASSERTION(test, assert_type, fmt, ...) \
+ KUNIT_ASSERTION(test, \
+ false, \
+ kunit_fail_assert, \
+ KUNIT_INIT_FAIL_ASSERT_STRUCT(test, assert_type), \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_FAIL() - Always causes a test to fail when evaluated.
+ * @test: The test context object.
+ * @fmt: an informational message to be printed when the assertion is made.
+ * @...: string format arguments.
+ *
+ * The opposite of KUNIT_SUCCEED(), it is an expectation that always fails. In
+ * other words, it always results in a failed expectation, and consequently
+ * always causes the test case to fail when evaluated. See KUNIT_EXPECT_TRUE()
+ * for more information.
+ */
+#define KUNIT_FAIL(test, fmt, ...) \
+ KUNIT_FAIL_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_UNARY_ASSERTION(test, \
+ assert_type, \
+ condition, \
+ expected_true, \
+ fmt, \
+ ...) \
+ KUNIT_ASSERTION(test, \
+ !!(condition) == !!expected_true, \
+ kunit_unary_assert, \
+ KUNIT_INIT_UNARY_ASSERT_STRUCT(test, \
+ assert_type, \
+ #condition, \
+ expected_true), \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_TRUE_MSG_ASSERTION(test, assert_type, condition, fmt, ...) \
+ KUNIT_UNARY_ASSERTION(test, \
+ assert_type, \
+ condition, \
+ true, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_TRUE_ASSERTION(test, assert_type, condition) \
+ KUNIT_TRUE_MSG_ASSERTION(test, assert_type, condition, NULL)
+
+#define KUNIT_FALSE_MSG_ASSERTION(test, assert_type, condition, fmt, ...) \
+ KUNIT_UNARY_ASSERTION(test, \
+ assert_type, \
+ condition, \
+ false, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_FALSE_ASSERTION(test, assert_type, condition) \
+ KUNIT_FALSE_MSG_ASSERTION(test, assert_type, condition, NULL)
+
+/*
+ * A factory macro for defining the assertions and expectations for the basic
+ * comparisons defined for the built in types.
+ *
+ * Unfortunately, there is no common type that all types can be promoted to for
+ * which all the binary operators behave the same way as for the actual types
+ * (for example, there is no type that long long and unsigned long long can
+ * both be cast to where the comparison result is preserved for all values). So
+ * the best we can do is do the comparison in the original types and then coerce
+ * everything to long long for printing; this way, the comparison behaves
+ * correctly and the printed out value usually makes sense without
+ * interpretation, but can always be interpreted to figure out the actual
+ * value.
+ */
+#define KUNIT_BASE_BINARY_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, \
+ op, \
+ right, \
+ fmt, \
+ ...) \
+do { \
+ typeof(left) __left = (left); \
+ typeof(right) __right = (right); \
+ ((void)__typecheck(__left, __right)); \
+ \
+ KUNIT_ASSERTION(test, \
+ __left op __right, \
+ assert_class, \
+ ASSERT_CLASS_INIT(test, \
+ assert_type, \
+ #op, \
+ #left, \
+ __left, \
+ #right, \
+ __right), \
+ fmt, \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define KUNIT_BASE_EQ_MSG_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_BINARY_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BASE_NE_MSG_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_BINARY_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BASE_LT_MSG_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_BINARY_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, <, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BASE_LE_MSG_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_BINARY_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, <=, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BASE_GT_MSG_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_BINARY_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, >, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BASE_GE_MSG_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_BINARY_ASSERTION(test, \
+ assert_class, \
+ ASSERT_CLASS_INIT, \
+ assert_type, \
+ left, >=, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_EQ_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
+ KUNIT_BASE_EQ_MSG_ASSERTION(test, \
+ kunit_binary_assert, \
+ KUNIT_INIT_BINARY_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_EQ_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_EQ_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_EQ_MSG_ASSERTION(test, \
+ kunit_binary_ptr_assert, \
+ KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_PTR_EQ_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_NE_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
+ KUNIT_BASE_NE_MSG_ASSERTION(test, \
+ kunit_binary_assert, \
+ KUNIT_INIT_BINARY_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_NE_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_NE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_NE_MSG_ASSERTION(test, \
+ kunit_binary_ptr_assert, \
+ KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_PTR_NE_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_LT_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
+ KUNIT_BASE_LT_MSG_ASSERTION(test, \
+ kunit_binary_assert, \
+ KUNIT_INIT_BINARY_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_LT_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_LT_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_PTR_LT_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_LT_MSG_ASSERTION(test, \
+ kunit_binary_ptr_assert, \
+ KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_PTR_LT_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_PTR_LT_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_LE_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
+ KUNIT_BASE_LE_MSG_ASSERTION(test, \
+ kunit_binary_assert, \
+ KUNIT_INIT_BINARY_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_LE_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_LE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_PTR_LE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_LE_MSG_ASSERTION(test, \
+ kunit_binary_ptr_assert, \
+ KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_PTR_LE_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_PTR_LE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_GT_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
+ KUNIT_BASE_GT_MSG_ASSERTION(test, \
+ kunit_binary_assert, \
+ KUNIT_INIT_BINARY_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_GT_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_GT_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_PTR_GT_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_GT_MSG_ASSERTION(test, \
+ kunit_binary_ptr_assert, \
+ KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_PTR_GT_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_PTR_GT_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_GE_MSG_ASSERTION(test, assert_type, left, right, fmt, ...)\
+ KUNIT_BASE_GE_MSG_ASSERTION(test, \
+ kunit_binary_assert, \
+ KUNIT_INIT_BINARY_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_GE_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_GE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_PTR_GE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BASE_GE_MSG_ASSERTION(test, \
+ kunit_binary_ptr_assert, \
+ KUNIT_INIT_BINARY_PTR_ASSERT_STRUCT, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_PTR_GE_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_PTR_GE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_STR_ASSERTION(test, \
+ assert_type, \
+ left, \
+ op, \
+ right, \
+ fmt, \
+ ...) \
+do { \
+ typeof(left) __left = (left); \
+ typeof(right) __right = (right); \
+ \
+ KUNIT_ASSERTION(test, \
+ strcmp(__left, __right) op 0, \
+ kunit_binary_str_assert, \
+ KUNIT_INIT_BINARY_ASSERT_STRUCT(test, \
+ assert_type, \
+ #op, \
+ #left, \
+ __left, \
+ #right, \
+ __right), \
+ fmt, \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BINARY_STR_ASSERTION(test, \
+ assert_type, \
+ left, ==, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_STR_EQ_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ fmt, \
+ ...) \
+ KUNIT_BINARY_STR_ASSERTION(test, \
+ assert_type, \
+ left, !=, right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_BINARY_STR_NE_ASSERTION(test, assert_type, left, right) \
+ KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \
+ assert_type, \
+ left, \
+ right, \
+ NULL)
+
+#define KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \
+ assert_type, \
+ ptr, \
+ fmt, \
+ ...) \
+do { \
+ typeof(ptr) __ptr = (ptr); \
+ \
+ KUNIT_ASSERTION(test, \
+ !IS_ERR_OR_NULL(__ptr), \
+ kunit_ptr_not_err_assert, \
+ KUNIT_INIT_PTR_NOT_ERR_STRUCT(test, \
+ assert_type, \
+ #ptr, \
+ __ptr), \
+ fmt, \
+ ##__VA_ARGS__); \
+} while (0)
+
+#define KUNIT_PTR_NOT_ERR_OR_NULL_ASSERTION(test, assert_type, ptr) \
+ KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \
+ assert_type, \
+ ptr, \
+ NULL)
+
+/**
+ * KUNIT_EXPECT_TRUE() - Causes a test failure when the expression is not true.
+ * @test: The test context object.
+ * @condition: an arbitrary boolean expression. The test fails when this does
+ * not evaluate to true.
+ *
+ * This and expectations of the form `KUNIT_EXPECT_*` will cause the test case
+ * to fail when the specified condition is not met; however, it will not prevent
+ * the test case from continuing to run; this is otherwise known as an
+ * *expectation failure*.
+ */
+#define KUNIT_EXPECT_TRUE(test, condition) \
+ KUNIT_TRUE_ASSERTION(test, KUNIT_EXPECTATION, condition)
+
+#define KUNIT_EXPECT_TRUE_MSG(test, condition, fmt, ...) \
+ KUNIT_TRUE_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ condition, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_FALSE() - Makes a test failure when the expression is not false.
+ * @test: The test context object.
+ * @condition: an arbitrary boolean expression. The test fails when this does
+ * not evaluate to false.
+ *
+ * Sets an expectation that @condition evaluates to false. See
+ * KUNIT_EXPECT_TRUE() for more information.
+ */
+#define KUNIT_EXPECT_FALSE(test, condition) \
+ KUNIT_FALSE_ASSERTION(test, KUNIT_EXPECTATION, condition)
+
+#define KUNIT_EXPECT_FALSE_MSG(test, condition, fmt, ...) \
+ KUNIT_FALSE_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ condition, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_EQ() - Sets an expectation that @left and @right are equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an expectation that the values that @left and @right evaluate to are
+ * equal. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, (@left) == (@right)). See KUNIT_EXPECT_TRUE() for
+ * more information.
+ */
+#define KUNIT_EXPECT_EQ(test, left, right) \
+ KUNIT_BINARY_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+
+#define KUNIT_EXPECT_EQ_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_EQ_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_PTR_EQ() - Expects that pointers @left and @right are equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a pointer.
+ * @right: an arbitrary expression that evaluates to a pointer.
+ *
+ * Sets an expectation that the values that @left and @right evaluate to are
+ * equal. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, (@left) == (@right)). See KUNIT_EXPECT_TRUE() for
+ * more information.
+ */
+#define KUNIT_EXPECT_PTR_EQ(test, left, right) \
+ KUNIT_BINARY_PTR_EQ_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right)
+
+#define KUNIT_EXPECT_PTR_EQ_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_NE() - An expectation that @left and @right are not equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an expectation that the values that @left and @right evaluate to are not
+ * equal. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, (@left) != (@right)). See KUNIT_EXPECT_TRUE() for
+ * more information.
+ */
+#define KUNIT_EXPECT_NE(test, left, right) \
+ KUNIT_BINARY_NE_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+
+#define KUNIT_EXPECT_NE_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_NE_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_PTR_NE() - Expects that pointers @left and @right are not equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a pointer.
+ * @right: an arbitrary expression that evaluates to a pointer.
+ *
+ * Sets an expectation that the values that @left and @right evaluate to are not
+ * equal. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, (@left) != (@right)). See KUNIT_EXPECT_TRUE() for
+ * more information.
+ */
+#define KUNIT_EXPECT_PTR_NE(test, left, right) \
+ KUNIT_BINARY_PTR_NE_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right)
+
+#define KUNIT_EXPECT_PTR_NE_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_LT() - An expectation that @left is less than @right.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an expectation that the value that @left evaluates to is less than the
+ * value that @right evaluates to. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, (@left) < (@right)). See KUNIT_EXPECT_TRUE() for
+ * more information.
+ */
+#define KUNIT_EXPECT_LT(test, left, right) \
+ KUNIT_BINARY_LT_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+
+#define KUNIT_EXPECT_LT_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_LT_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_LE() - Expects that @left is less than or equal to @right.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an expectation that the value that @left evaluates to is less than or
+ * equal to the value that @right evaluates to. Semantically this is equivalent
+ * to KUNIT_EXPECT_TRUE(@test, (@left) <= (@right)). See KUNIT_EXPECT_TRUE() for
+ * more information.
+ */
+#define KUNIT_EXPECT_LE(test, left, right) \
+ KUNIT_BINARY_LE_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+
+#define KUNIT_EXPECT_LE_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_LE_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_GT() - An expectation that @left is greater than @right.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an expectation that the value that @left evaluates to is greater than
+ * the value that @right evaluates to. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, (@left) > (@right)). See KUNIT_EXPECT_TRUE() for
+ * more information.
+ */
+#define KUNIT_EXPECT_GT(test, left, right) \
+ KUNIT_BINARY_GT_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+
+#define KUNIT_EXPECT_GT_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_GT_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_GE() - Expects that @left is greater than or equal to @right.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an expectation that the value that @left evaluates to is greater than
+ * the value that @right evaluates to. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, (@left) >= (@right)). See KUNIT_EXPECT_TRUE() for
+ * more information.
+ */
+#define KUNIT_EXPECT_GE(test, left, right) \
+ KUNIT_BINARY_GE_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+
+#define KUNIT_EXPECT_GE_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_GE_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_STREQ() - Expects that strings @left and @right are equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a null terminated string.
+ * @right: an arbitrary expression that evaluates to a null terminated string.
+ *
+ * Sets an expectation that the values that @left and @right evaluate to are
+ * equal. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, !strcmp((@left), (@right))). See KUNIT_EXPECT_TRUE()
+ * for more information.
+ */
+#define KUNIT_EXPECT_STREQ(test, left, right) \
+ KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+
+#define KUNIT_EXPECT_STREQ_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_STRNEQ() - Expects that strings @left and @right are not equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a null terminated string.
+ * @right: an arbitrary expression that evaluates to a null terminated string.
+ *
+ * Sets an expectation that the values that @left and @right evaluate to are
+ * not equal. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, strcmp((@left), (@right))). See KUNIT_EXPECT_TRUE()
+ * for more information.
+ */
+#define KUNIT_EXPECT_STRNEQ(test, left, right) \
+ KUNIT_BINARY_STR_NE_ASSERTION(test, KUNIT_EXPECTATION, left, right)
+
+#define KUNIT_EXPECT_STRNEQ_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_EXPECT_NOT_ERR_OR_NULL() - Expects that @ptr is not null and not err.
+ * @test: The test context object.
+ * @ptr: an arbitrary pointer.
+ *
+ * Sets an expectation that the value that @ptr evaluates to is not null and not
+ * an errno stored in a pointer. This is semantically equivalent to
+ * KUNIT_EXPECT_TRUE(@test, !IS_ERR_OR_NULL(@ptr)). See KUNIT_EXPECT_TRUE() for
+ * more information.
+ */
+#define KUNIT_EXPECT_NOT_ERR_OR_NULL(test, ptr) \
+ KUNIT_PTR_NOT_ERR_OR_NULL_ASSERTION(test, KUNIT_EXPECTATION, ptr)
+
+#define KUNIT_EXPECT_NOT_ERR_OR_NULL_MSG(test, ptr, fmt, ...) \
+ KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \
+ KUNIT_EXPECTATION, \
+ ptr, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#define KUNIT_ASSERT_FAILURE(test, fmt, ...) \
+ KUNIT_FAIL_ASSERTION(test, KUNIT_ASSERTION, fmt, ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_TRUE() - Sets an assertion that @condition is true.
+ * @test: The test context object.
+ * @condition: an arbitrary boolean expression. The test fails and aborts when
+ * this does not evaluate to true.
+ *
+ * This and assertions of the form `KUNIT_ASSERT_*` will cause the test case to
+ * fail *and immediately abort* when the specified condition is not met. Unlike
+ * an expectation failure, it will prevent the test case from continuing to run;
+ * this is otherwise known as an *assertion failure*.
+ */
+#define KUNIT_ASSERT_TRUE(test, condition) \
+ KUNIT_TRUE_ASSERTION(test, KUNIT_ASSERTION, condition)
+
+#define KUNIT_ASSERT_TRUE_MSG(test, condition, fmt, ...) \
+ KUNIT_TRUE_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ condition, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_FALSE() - Sets an assertion that @condition is false.
+ * @test: The test context object.
+ * @condition: an arbitrary boolean expression.
+ *
+ * Sets an assertion that the value that @condition evaluates to is false. This
+ * is the same as KUNIT_EXPECT_FALSE(), except it causes an assertion failure
+ * (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_FALSE(test, condition) \
+ KUNIT_FALSE_ASSERTION(test, KUNIT_ASSERTION, condition)
+
+#define KUNIT_ASSERT_FALSE_MSG(test, condition, fmt, ...) \
+ KUNIT_FALSE_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ condition, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_EQ() - Sets an assertion that @left and @right are equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an assertion that the values that @left and @right evaluate to are
+ * equal. This is the same as KUNIT_EXPECT_EQ(), except it causes an assertion
+ * failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_EQ(test, left, right) \
+ KUNIT_BINARY_EQ_ASSERTION(test, KUNIT_ASSERTION, left, right)
+
+#define KUNIT_ASSERT_EQ_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_EQ_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_PTR_EQ() - Asserts that pointers @left and @right are equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a pointer.
+ * @right: an arbitrary expression that evaluates to a pointer.
+ *
+ * Sets an assertion that the values that @left and @right evaluate to are
+ * equal. This is the same as KUNIT_EXPECT_EQ(), except it causes an assertion
+ * failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_PTR_EQ(test, left, right) \
+ KUNIT_BINARY_PTR_EQ_ASSERTION(test, KUNIT_ASSERTION, left, right)
+
+#define KUNIT_ASSERT_PTR_EQ_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_PTR_EQ_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_NE() - An assertion that @left and @right are not equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an assertion that the values that @left and @right evaluate to are not
+ * equal. This is the same as KUNIT_EXPECT_NE(), except it causes an assertion
+ * failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_NE(test, left, right) \
+ KUNIT_BINARY_NE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+
+#define KUNIT_ASSERT_NE_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_NE_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_PTR_NE() - Asserts that pointers @left and @right are not equal.
+ * KUNIT_ASSERT_PTR_EQ() - Asserts that pointers @left and @right are equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a pointer.
+ * @right: an arbitrary expression that evaluates to a pointer.
+ *
+ * Sets an assertion that the values that @left and @right evaluate to are not
+ * equal. This is the same as KUNIT_EXPECT_NE(), except it causes an assertion
+ * failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_PTR_NE(test, left, right) \
+ KUNIT_BINARY_PTR_NE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+
+#define KUNIT_ASSERT_PTR_NE_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_PTR_NE_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+/**
+ * KUNIT_ASSERT_LT() - An assertion that @left is less than @right.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an assertion that the value that @left evaluates to is less than the
+ * value that @right evaluates to. This is the same as KUNIT_EXPECT_LT(), except
+ * it causes an assertion failure (see KUNIT_ASSERT_TRUE()) when the assertion
+ * is not met.
+ */
+#define KUNIT_ASSERT_LT(test, left, right) \
+ KUNIT_BINARY_LT_ASSERTION(test, KUNIT_ASSERTION, left, right)
+
+#define KUNIT_ASSERT_LT_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_LT_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+/**
+ * KUNIT_ASSERT_LE() - An assertion that @left is less than or equal to @right.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an assertion that the value that @left evaluates to is less than or
+ * equal to the value that @right evaluates to. This is the same as
+ * KUNIT_EXPECT_LE(), except it causes an assertion failure (see
+ * KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_LE(test, left, right) \
+ KUNIT_BINARY_LE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+
+#define KUNIT_ASSERT_LE_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_LE_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_GT() - An assertion that @left is greater than @right.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an assertion that the value that @left evaluates to is greater than the
+ * value that @right evaluates to. This is the same as KUNIT_EXPECT_GT(), except
+ * it causes an assertion failure (see KUNIT_ASSERT_TRUE()) when the assertion
+ * is not met.
+ */
+#define KUNIT_ASSERT_GT(test, left, right) \
+ KUNIT_BINARY_GT_ASSERTION(test, KUNIT_ASSERTION, left, right)
+
+#define KUNIT_ASSERT_GT_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_GT_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_GE() - Assertion that @left is greater than or equal to @right.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a primitive C type.
+ * @right: an arbitrary expression that evaluates to a primitive C type.
+ *
+ * Sets an assertion that the value that @left evaluates to is greater than the
+ * value that @right evaluates to. This is the same as KUNIT_EXPECT_GE(), except
+ * it causes an assertion failure (see KUNIT_ASSERT_TRUE()) when the assertion
+ * is not met.
+ */
+#define KUNIT_ASSERT_GE(test, left, right) \
+ KUNIT_BINARY_GE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+
+#define KUNIT_ASSERT_GE_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_GE_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_STREQ() - An assertion that strings @left and @right are equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a null terminated string.
+ * @right: an arbitrary expression that evaluates to a null terminated string.
+ *
+ * Sets an assertion that the values that @left and @right evaluate to are
+ * equal. This is the same as KUNIT_EXPECT_STREQ(), except it causes an
+ * assertion failure (see KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_STREQ(test, left, right) \
+ KUNIT_BINARY_STR_EQ_ASSERTION(test, KUNIT_ASSERTION, left, right)
+
+#define KUNIT_ASSERT_STREQ_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_STR_EQ_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_STRNEQ() - Expects that strings @left and @right are not equal.
+ * @test: The test context object.
+ * @left: an arbitrary expression that evaluates to a null terminated string.
+ * @right: an arbitrary expression that evaluates to a null terminated string.
+ *
+ * Sets an expectation that the values that @left and @right evaluate to are
+ * not equal. This is semantically equivalent to
+ * KUNIT_ASSERT_TRUE(@test, strcmp((@left), (@right))). See KUNIT_ASSERT_TRUE()
+ * for more information.
+ */
+#define KUNIT_ASSERT_STRNEQ(test, left, right) \
+ KUNIT_BINARY_STR_NE_ASSERTION(test, KUNIT_ASSERTION, left, right)
+
+#define KUNIT_ASSERT_STRNEQ_MSG(test, left, right, fmt, ...) \
+ KUNIT_BINARY_STR_NE_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ left, \
+ right, \
+ fmt, \
+ ##__VA_ARGS__)
+
+/**
+ * KUNIT_ASSERT_NOT_ERR_OR_NULL() - Assertion that @ptr is not null and not err.
+ * @test: The test context object.
+ * @ptr: an arbitrary pointer.
+ *
+ * Sets an assertion that the value that @ptr evaluates to is not null and not
+ * an errno stored in a pointer. This is the same as
+ * KUNIT_EXPECT_NOT_ERR_OR_NULL(), except it causes an assertion failure (see
+ * KUNIT_ASSERT_TRUE()) when the assertion is not met.
+ */
+#define KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ptr) \
+ KUNIT_PTR_NOT_ERR_OR_NULL_ASSERTION(test, KUNIT_ASSERTION, ptr)
+
+#define KUNIT_ASSERT_NOT_ERR_OR_NULL_MSG(test, ptr, fmt, ...) \
+ KUNIT_PTR_NOT_ERR_OR_NULL_MSG_ASSERTION(test, \
+ KUNIT_ASSERTION, \
+ ptr, \
+ fmt, \
+ ##__VA_ARGS__)
+
+#endif /* _KUNIT_TEST_H */
diff --git a/include/kunit/try-catch.h b/include/kunit/try-catch.h
new file mode 100644
index 000000000000..c507dd43119d
--- /dev/null
+++ b/include/kunit/try-catch.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * An API to allow a function, that may fail, to be executed, and recover in a
+ * controlled manner.
+ *
+ * Copyright (C) 2019, Google LLC.
+ * Author: Brendan Higgins <brendanhiggins@google.com>
+ */
+
+#ifndef _KUNIT_TRY_CATCH_H
+#define _KUNIT_TRY_CATCH_H
+
+#include <linux/types.h>
+
+typedef void (*kunit_try_catch_func_t)(void *);
+
+struct completion;
+struct kunit;
+
+/**
+ * struct kunit_try_catch - provides a generic way to run code which might fail.
+ * @test: The test case that is currently being executed.
+ * @try_completion: Completion that the control thread waits on while test runs.
+ * @try_result: Contains any errno obtained while running test case.
+ * @try: The function, the test case, to attempt to run.
+ * @catch: The function called if @try bails out.
+ * @context: used to pass user data to the try and catch functions.
+ *
+ * kunit_try_catch provides a generic, architecture independent way to execute
+ * an arbitrary function of type kunit_try_catch_func_t which may bail out by
+ * calling kunit_try_catch_throw(). If kunit_try_catch_throw() is called, @try
+ * is stopped at the site of invocation and @catch is called.
+ *
+ * struct kunit_try_catch provides a generic interface for the functionality
+ * needed to implement kunit->abort() which in turn is needed for implementing
+ * assertions. Assertions allow stating a precondition for a test simplifying
+ * how test cases are written and presented.
+ *
+ * Assertions are like expectations, except they abort (call
+ * kunit_try_catch_throw()) when the specified condition is not met. This is
+ * useful when you look at a test case as a logical statement about some piece
+ * of code, where assertions are the premises for the test case, and the
+ * conclusion is a set of predicates, rather expectations, that must all be
+ * true. If your premises are violated, it does not makes sense to continue.
+ */
+struct kunit_try_catch {
+ /* private: internal use only. */
+ struct kunit *test;
+ struct completion *try_completion;
+ int try_result;
+ kunit_try_catch_func_t try;
+ kunit_try_catch_func_t catch;
+ void *context;
+};
+
+void kunit_try_catch_run(struct kunit_try_catch *try_catch, void *context);
+
+void __noreturn kunit_try_catch_throw(struct kunit_try_catch *try_catch);
+
+static inline int kunit_try_catch_get_result(struct kunit_try_catch *try_catch)
+{
+ return try_catch->try_result;
+}
+
+#endif /* _KUNIT_TRY_CATCH_H */
diff --git a/include/kvm/arm_hypercalls.h b/include/kvm/arm_hypercalls.h
new file mode 100644
index 000000000000..0e2509d27910
--- /dev/null
+++ b/include/kvm/arm_hypercalls.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019 Arm Ltd. */
+
+#ifndef __KVM_ARM_HYPERCALLS_H
+#define __KVM_ARM_HYPERCALLS_H
+
+#include <asm/kvm_emulate.h>
+
+int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
+
+static inline u32 smccc_get_function(struct kvm_vcpu *vcpu)
+{
+ return vcpu_get_reg(vcpu, 0);
+}
+
+static inline unsigned long smccc_get_arg1(struct kvm_vcpu *vcpu)
+{
+ return vcpu_get_reg(vcpu, 1);
+}
+
+static inline unsigned long smccc_get_arg2(struct kvm_vcpu *vcpu)
+{
+ return vcpu_get_reg(vcpu, 2);
+}
+
+static inline unsigned long smccc_get_arg3(struct kvm_vcpu *vcpu)
+{
+ return vcpu_get_reg(vcpu, 3);
+}
+
+static inline void smccc_set_retval(struct kvm_vcpu *vcpu,
+ unsigned long a0,
+ unsigned long a1,
+ unsigned long a2,
+ unsigned long a3)
+{
+ vcpu_set_reg(vcpu, 0, a0);
+ vcpu_set_reg(vcpu, 1, a1);
+ vcpu_set_reg(vcpu, 2, a2);
+ vcpu_set_reg(vcpu, 3, a3);
+}
+
+#endif
diff --git a/include/kvm/arm_psci.h b/include/kvm/arm_psci.h
index 632e78bdef4d..5b58bd2fe088 100644
--- a/include/kvm/arm_psci.h
+++ b/include/kvm/arm_psci.h
@@ -40,7 +40,7 @@ static inline int kvm_psci_version(struct kvm_vcpu *vcpu, struct kvm *kvm)
}
-int kvm_hvc_call_handler(struct kvm_vcpu *vcpu);
+int kvm_psci_call(struct kvm_vcpu *vcpu);
struct kvm_one_reg;
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index af4f09c02bf1..63457908c9c4 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -70,6 +70,7 @@ struct vgic_global {
/* Hardware has GICv4? */
bool has_gicv4;
+ bool has_gicv4_1;
/* GIC system register CPU interface */
struct static_key_false gicv3_cpuif;
@@ -240,7 +241,7 @@ struct vgic_dist {
* Contains the attributes and gpa of the LPI configuration table.
* Since we report GICR_TYPER.CommonLPIAff as 0b00, we can share
* one address across all redistributors.
- * GICv3 spec: 6.1.2 "LPI Configuration tables"
+ * GICv3 spec: IHI 0069E 6.1.1 "LPI Configuration tables"
*/
u64 propbaser;
@@ -378,8 +379,6 @@ static inline int kvm_vgic_get_max_vcpus(void)
return kvm_vgic_global_state.max_gic_vcpus;
}
-int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
-
/**
* kvm_vgic_setup_default_irq_routing:
* Setup a default flat gsi routing table mapping all SPIs
@@ -396,7 +395,7 @@ int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int irq,
int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int irq,
struct kvm_kernel_irq_routing_entry *irq_entry);
-void kvm_vgic_v4_enable_doorbell(struct kvm_vcpu *vcpu);
-void kvm_vgic_v4_disable_doorbell(struct kvm_vcpu *vcpu);
+int vgic_v4_load(struct kvm_vcpu *vcpu);
+int vgic_v4_put(struct kvm_vcpu *vcpu, bool need_db);
#endif /* __KVM_ARM_VGIC_H */
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 8b4e516bac00..0f24d701fbdc 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -279,6 +279,21 @@ static inline bool invalid_phys_cpuid(phys_cpuid_t phys_id)
/* Validate the processor object's proc_id */
bool acpi_duplicate_processor_id(int proc_id);
+/* Processor _CTS control */
+struct acpi_processor_power;
+
+#ifdef CONFIG_ACPI_PROCESSOR_CSTATE
+bool acpi_processor_claim_cst_control(void);
+int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
+ struct acpi_processor_power *info);
+#else
+static inline bool acpi_processor_claim_cst_control(void) { return false; }
+static inline int acpi_processor_evaluate_cst(acpi_handle handle, u32 cpu,
+ struct acpi_processor_power *info)
+{
+ return -ENODEV;
+}
+#endif
#ifdef CONFIG_ACPI_HOTPLUG_CPU
/* Arch dependent functions for cpu hotplug support */
@@ -678,6 +693,14 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
return false;
}
+struct acpi_device;
+
+static inline bool
+acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2)
+{
+ return false;
+}
+
static inline struct acpi_device *
acpi_dev_get_first_match_dev(const char *hid, const char *uid, s64 hrv)
{
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 514bffa11dbb..fa19e01f418a 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -46,6 +46,8 @@ int pci_enable_pcie_error_reporting(struct pci_dev *dev);
int pci_disable_pcie_error_reporting(struct pci_dev *dev);
int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev);
int pci_cleanup_aer_error_status_regs(struct pci_dev *dev);
+void pci_save_aer_state(struct pci_dev *dev);
+void pci_restore_aer_state(struct pci_dev *dev);
#else
static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
{
@@ -63,6 +65,8 @@ static inline int pci_cleanup_aer_error_status_regs(struct pci_dev *dev)
{
return -EINVAL;
}
+static inline void pci_save_aer_state(struct pci_dev *dev) {}
+static inline void pci_restore_aer_state(struct pci_dev *dev) {}
#endif
void cper_print_aer(struct pci_dev *dev, int aer_severity,
diff --git a/include/linux/agpgart.h b/include/linux/agpgart.h
index c6b61ca97053..21b34a96cfd8 100644
--- a/include/linux/agpgart.h
+++ b/include/linux/agpgart.h
@@ -30,8 +30,6 @@
#include <linux/agp_backend.h>
#include <uapi/linux/agpgart.h>
-#define AGPGART_MINOR 175
-
struct agp_info {
struct agp_version version; /* version of the driver */
u32 bridge_id; /* bridge vendor/device */
diff --git a/include/linux/ahci_platform.h b/include/linux/ahci_platform.h
index 6782f0d45ebe..49e5383d4222 100644
--- a/include/linux/ahci_platform.h
+++ b/include/linux/ahci_platform.h
@@ -19,6 +19,8 @@ struct ahci_host_priv;
struct platform_device;
struct scsi_host_template;
+int ahci_platform_enable_phys(struct ahci_host_priv *hpriv);
+void ahci_platform_disable_phys(struct ahci_host_priv *hpriv);
int ahci_platform_enable_clks(struct ahci_host_priv *hpriv);
void ahci_platform_disable_clks(struct ahci_host_priv *hpriv);
int ahci_platform_enable_regulators(struct ahci_host_priv *hpriv);
diff --git a/include/linux/alarmtimer.h b/include/linux/alarmtimer.h
index 74748e306f4b..05e758b8b894 100644
--- a/include/linux/alarmtimer.h
+++ b/include/linux/alarmtimer.h
@@ -60,7 +60,11 @@ u64 alarm_forward(struct alarm *alarm, ktime_t now, ktime_t interval);
u64 alarm_forward_now(struct alarm *alarm, ktime_t interval);
ktime_t alarm_expires_remaining(const struct alarm *alarm);
+#ifdef CONFIG_RTC_CLASS
/* Provide way to access the rtc device being used by alarmtimers */
struct rtc_device *alarmtimer_get_rtcdev(void);
+#else
+static inline struct rtc_device *alarmtimer_get_rtcdev(void) { return NULL; }
+#endif
#endif
diff --git a/include/linux/alcor_pci.h b/include/linux/alcor_pci.h
index 4416df597526..8274ed525e9f 100644
--- a/include/linux/alcor_pci.h
+++ b/include/linux/alcor_pci.h
@@ -17,6 +17,7 @@
#define PCI_ID_ALCOR_MICRO 0x1AEA
#define PCI_ID_AU6601 0x6601
#define PCI_ID_AU6621 0x6621
+#define PCI_ID_AU6625 0x6625
#define MHZ_TO_HZ(freq) ((freq) * 1000 * 1000)
diff --git a/include/linux/arch_topology.h b/include/linux/arch_topology.h
index 42f2b5126094..0566cb3314ef 100644
--- a/include/linux/arch_topology.h
+++ b/include/linux/arch_topology.h
@@ -16,9 +16,7 @@ bool topology_parse_cpu_capacity(struct device_node *cpu_node, int cpu);
DECLARE_PER_CPU(unsigned long, cpu_scale);
-struct sched_domain;
-static inline
-unsigned long topology_get_cpu_scale(int cpu)
+static inline unsigned long topology_get_cpu_scale(int cpu)
{
return per_cpu(cpu_scale, cpu);
}
@@ -27,12 +25,23 @@ void topology_set_cpu_scale(unsigned int cpu, unsigned long capacity);
DECLARE_PER_CPU(unsigned long, freq_scale);
-static inline
-unsigned long topology_get_freq_scale(int cpu)
+static inline unsigned long topology_get_freq_scale(int cpu)
{
return per_cpu(freq_scale, cpu);
}
+bool arch_freq_counters_available(struct cpumask *cpus);
+
+DECLARE_PER_CPU(unsigned long, thermal_pressure);
+
+static inline unsigned long topology_get_thermal_pressure(int cpu)
+{
+ return per_cpu(thermal_pressure, cpu);
+}
+
+void arch_set_thermal_pressure(struct cpumask *cpus,
+ unsigned long th_pressure);
+
struct cpu_topology {
int thread_id;
int core_id;
@@ -57,6 +66,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu);
void update_siblings_masks(unsigned int cpu);
void remove_cpu_topology(unsigned int cpuid);
void reset_cpu_topology(void);
+int parse_acpi_topology(void);
#endif
#endif /* _LINUX_ARCH_TOPOLOGY_H_ */
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 080012a6f025..59494df0f55b 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -45,6 +45,7 @@
#define ARM_SMCCC_OWNER_SIP 2
#define ARM_SMCCC_OWNER_OEM 3
#define ARM_SMCCC_OWNER_STANDARD 4
+#define ARM_SMCCC_OWNER_STANDARD_HYP 5
#define ARM_SMCCC_OWNER_TRUSTED_APP 48
#define ARM_SMCCC_OWNER_TRUSTED_APP_END 49
#define ARM_SMCCC_OWNER_TRUSTED_OS 50
@@ -80,6 +81,22 @@
#include <linux/linkage.h>
#include <linux/types.h>
+
+enum arm_smccc_conduit {
+ SMCCC_CONDUIT_NONE,
+ SMCCC_CONDUIT_SMC,
+ SMCCC_CONDUIT_HVC,
+};
+
+/**
+ * arm_smccc_1_1_get_conduit()
+ *
+ * Returns the conduit to be used for SMCCCv1.1 or later.
+ *
+ * When SMCCCv1.1 is not present, returns SMCCC_CONDUIT_NONE.
+ */
+enum arm_smccc_conduit arm_smccc_1_1_get_conduit(void);
+
/**
* struct arm_smccc_res - Result from SMC/HVC call
* @a0-a3 result values from registers 0 to 3
@@ -302,5 +319,63 @@ asmlinkage void __arm_smccc_hvc(unsigned long a0, unsigned long a1,
#define SMCCC_RET_NOT_SUPPORTED -1
#define SMCCC_RET_NOT_REQUIRED -2
+/*
+ * Like arm_smccc_1_1* but always returns SMCCC_RET_NOT_SUPPORTED.
+ * Used when the SMCCC conduit is not defined. The empty asm statement
+ * avoids compiler warnings about unused variables.
+ */
+#define __fail_smccc_1_1(...) \
+ do { \
+ __declare_args(__count_args(__VA_ARGS__), __VA_ARGS__); \
+ asm ("" __constraints(__count_args(__VA_ARGS__))); \
+ if (___res) \
+ ___res->a0 = SMCCC_RET_NOT_SUPPORTED; \
+ } while (0)
+
+/*
+ * arm_smccc_1_1_invoke() - make an SMCCC v1.1 compliant call
+ *
+ * This is a variadic macro taking one to eight source arguments, and
+ * an optional return structure.
+ *
+ * @a0-a7: arguments passed in registers 0 to 7
+ * @res: result values from registers 0 to 3
+ *
+ * This macro will make either an HVC call or an SMC call depending on the
+ * current SMCCC conduit. If no valid conduit is available then -1
+ * (SMCCC_RET_NOT_SUPPORTED) is returned in @res.a0 (if supplied).
+ *
+ * The return value also provides the conduit that was used.
+ */
+#define arm_smccc_1_1_invoke(...) ({ \
+ int method = arm_smccc_1_1_get_conduit(); \
+ switch (method) { \
+ case SMCCC_CONDUIT_HVC: \
+ arm_smccc_1_1_hvc(__VA_ARGS__); \
+ break; \
+ case SMCCC_CONDUIT_SMC: \
+ arm_smccc_1_1_smc(__VA_ARGS__); \
+ break; \
+ default: \
+ __fail_smccc_1_1(__VA_ARGS__); \
+ method = SMCCC_CONDUIT_NONE; \
+ break; \
+ } \
+ method; \
+ })
+
+/* Paravirtualised time calls (defined by ARM DEN0057A) */
+#define ARM_SMCCC_HV_PV_TIME_FEATURES \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD_HYP, \
+ 0x20)
+
+#define ARM_SMCCC_HV_PV_TIME_ST \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
+ ARM_SMCCC_SMC_64, \
+ ARM_SMCCC_OWNER_STANDARD_HYP, \
+ 0x21)
+
#endif /*__ASSEMBLY__*/
#endif /*__LINUX_ARM_SMCCC_H*/
diff --git a/include/linux/arm_sdei.h b/include/linux/arm_sdei.h
index 3305ea7f9dc7..0a241c5c911d 100644
--- a/include/linux/arm_sdei.h
+++ b/include/linux/arm_sdei.h
@@ -5,12 +5,6 @@
#include <uapi/linux/arm_sdei.h>
-enum sdei_conduit_types {
- CONDUIT_INVALID = 0,
- CONDUIT_SMC,
- CONDUIT_HVC,
-};
-
#include <acpi/ghes.h>
#ifdef CONFIG_ARM_SDE_INTERFACE
diff --git a/include/linux/atmel-isc-media.h b/include/linux/atmel-isc-media.h
new file mode 100644
index 000000000000..79a320fb724e
--- /dev/null
+++ b/include/linux/atmel-isc-media.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2019 Microchip Technology Inc. and its subsidiaries
+ *
+ * Author: Eugen Hristev <eugen.hristev@microchip.com>
+ */
+
+#ifndef __LINUX_ATMEL_ISC_MEDIA_H__
+#define __LINUX_ATMEL_ISC_MEDIA_H__
+
+/*
+ * There are 8 controls available:
+ * 4 gain controls, sliders, for each of the BAYER components: R, B, GR, GB.
+ * These gains are multipliers for each component, in format unsigned 0:4:9 with
+ * a default value of 512 (1.0 multiplier).
+ * 4 offset controls, sliders, for each of the BAYER components: R, B, GR, GB.
+ * These offsets are added/substracted from each component, in format signed
+ * 1:12:0 with a default value of 0 (+/- 0)
+ *
+ * To expose this to userspace, added 8 custom controls, in an auto cluster.
+ *
+ * To summarize the functionality:
+ * The auto cluster switch is the auto white balance control, and it works
+ * like this:
+ * AWB == 1: autowhitebalance is on, the do_white_balance button is inactive,
+ * the gains/offsets are inactive, but volatile and readable.
+ * Thus, the results of the whitebalance algorithm are available to userspace to
+ * read at any time.
+ * AWB == 0: autowhitebalance is off, cluster is in manual mode, user can
+ * configure the gain/offsets directly.
+ * More than that, if the do_white_balance button is
+ * pressed, the driver will perform one-time-adjustment, (preferably with color
+ * checker card) and the userspace can read again the new values.
+ *
+ * With this feature, the userspace can save the coefficients and reinstall them
+ * for example after reboot or reprobing the driver.
+ */
+
+enum atmel_isc_ctrl_id {
+ /* Red component gain control */
+ ISC_CID_R_GAIN = (V4L2_CID_USER_ATMEL_ISC_BASE + 0),
+ /* Blue component gain control */
+ ISC_CID_B_GAIN,
+ /* Green Red component gain control */
+ ISC_CID_GR_GAIN,
+ /* Green Blue gain control */
+ ISC_CID_GB_GAIN,
+ /* Red component offset control */
+ ISC_CID_R_OFFSET,
+ /* Blue component offset control */
+ ISC_CID_B_OFFSET,
+ /* Green Red component offset control */
+ ISC_CID_GR_OFFSET,
+ /* Green Blue component offset control */
+ ISC_CID_GB_OFFSET,
+};
+
+#endif
diff --git a/include/linux/attribute_container.h b/include/linux/attribute_container.h
index d12bb2153cd6..e4004d1e6725 100644
--- a/include/linux/attribute_container.h
+++ b/include/linux/attribute_container.h
@@ -54,6 +54,13 @@ void attribute_container_device_trigger(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *,
struct device *));
+int attribute_container_device_trigger_safe(struct device *dev,
+ int (*fn)(struct attribute_container *,
+ struct device *,
+ struct device *),
+ int (*undo)(struct attribute_container *,
+ struct device *,
+ struct device *));
void attribute_container_trigger(struct device *dev,
int (*fn)(struct attribute_container *,
struct device *));
diff --git a/include/linux/audit.h b/include/linux/audit.h
index aee3dc9eb378..f9ceae57ca8d 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -156,7 +156,8 @@ extern void audit_log_d_path(struct audit_buffer *ab,
const struct path *path);
extern void audit_log_key(struct audit_buffer *ab,
char *key);
-extern void audit_log_link_denied(const char *operation);
+extern void audit_log_path_denied(int type,
+ const char *operation);
extern void audit_log_lost(const char *message);
extern int audit_log_task_context(struct audit_buffer *ab);
@@ -217,7 +218,7 @@ static inline void audit_log_d_path(struct audit_buffer *ab,
{ }
static inline void audit_log_key(struct audit_buffer *ab, char *key)
{ }
-static inline void audit_log_link_denied(const char *string)
+static inline void audit_log_path_denied(int type, const char *operation)
{ }
static inline int audit_log_task_context(struct audit_buffer *ab)
{
diff --git a/include/linux/b1pcmcia.h b/include/linux/b1pcmcia.h
deleted file mode 100644
index 12a867c6061e..000000000000
--- a/include/linux/b1pcmcia.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* $Id: b1pcmcia.h,v 1.1.8.2 2001/09/23 22:25:05 kai Exp $
- *
- * Exported functions of module b1pcmcia to be called by
- * avm_cs card services module.
- *
- * Copyright 1999 by Carsten Paeth (calle@calle.in-berlin.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef _B1PCMCIA_H_
-#define _B1PCMCIA_H_
-
-int b1pcmcia_addcard_b1(unsigned int port, unsigned irq);
-int b1pcmcia_addcard_m1(unsigned int port, unsigned irq);
-int b1pcmcia_addcard_m2(unsigned int port, unsigned irq);
-int b1pcmcia_delcard(unsigned int port, unsigned irq);
-
-#endif /* _B1PCMCIA_H_ */
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index 97967ce06de3..f88197c1ffc2 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -13,6 +13,7 @@
#include <linux/fs.h>
#include <linux/sched.h>
#include <linux/blkdev.h>
+#include <linux/device.h>
#include <linux/writeback.h>
#include <linux/blk-cgroup.h>
#include <linux/backing-dev-defs.h>
@@ -504,4 +505,13 @@ static inline int bdi_rw_congested(struct backing_dev_info *bdi)
(1 << WB_async_congested));
}
+extern const char *bdi_unknown_name;
+
+static inline const char *bdi_dev_name(struct backing_dev_info *bdi)
+{
+ if (!bdi || !bdi->dev)
+ return bdi_unknown_name;
+ return dev_name(bdi->dev);
+}
+
#endif /* _LINUX_BACKING_DEV_H */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 3cdb84cdc488..c1c0f9ea4e63 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -441,14 +441,6 @@ void __bio_add_page(struct bio *bio, struct page *page,
unsigned int len, unsigned int off);
int bio_iov_iter_get_pages(struct bio *bio, struct iov_iter *iter);
void bio_release_pages(struct bio *bio, bool mark_dirty);
-struct rq_map_data;
-extern struct bio *bio_map_user_iov(struct request_queue *,
- struct iov_iter *, gfp_t);
-extern void bio_unmap_user(struct bio *);
-extern struct bio *bio_map_kern(struct request_queue *, void *, unsigned int,
- gfp_t);
-extern struct bio *bio_copy_kern(struct request_queue *, void *, unsigned int,
- gfp_t, int);
extern void bio_set_pages_dirty(struct bio *bio);
extern void bio_check_pages_dirty(struct bio *bio);
@@ -463,13 +455,9 @@ extern void bio_copy_data_iter(struct bio *dst, struct bvec_iter *dst_iter,
extern void bio_copy_data(struct bio *dst, struct bio *src);
extern void bio_list_copy_data(struct bio *dst, struct bio *src);
extern void bio_free_pages(struct bio *bio);
-
-extern struct bio *bio_copy_user_iov(struct request_queue *,
- struct rq_map_data *,
- struct iov_iter *,
- gfp_t);
-extern int bio_uncopy_user(struct bio *);
void zero_fill_bio_iter(struct bio *bio, struct bvec_iter iter);
+void bio_truncate(struct bio *bio, unsigned new_size);
+void guard_bio_eod(struct bio *bio);
static inline void zero_fill_bio(struct bio *bio)
{
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 29fc933df3bf..e52ceb1a73d3 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -53,6 +53,8 @@
* bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
+ * bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest
+ * bitmap_replace(dst, old, new, mask, nbits) *dst = (*old & ~(*mask)) | (*new & *mask)
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
* bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit)
* bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap
@@ -66,6 +68,8 @@
* bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region
* bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst
* bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst
+ * bitmap_get_value8(map, start) Get 8bit value from map at start
+ * bitmap_set_value8(map, value, start) Set 8bit value to map at start
*
* Note, bitmap_zero() and bitmap_fill() operate over the region of
* unsigned longs, that is, bits behind bitmap till the unsigned long
@@ -130,6 +134,9 @@ extern void __bitmap_shift_right(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits);
extern void __bitmap_shift_left(unsigned long *dst, const unsigned long *src,
unsigned int shift, unsigned int nbits);
+extern void bitmap_cut(unsigned long *dst, const unsigned long *src,
+ unsigned int first, unsigned int cut,
+ unsigned int nbits);
extern int __bitmap_and(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
extern void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1,
@@ -138,6 +145,9 @@ extern void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
extern int __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
+extern void __bitmap_replace(unsigned long *dst,
+ const unsigned long *old, const unsigned long *new,
+ const unsigned long *mask, unsigned int nbits);
extern int __bitmap_intersects(const unsigned long *bitmap1,
const unsigned long *bitmap2, unsigned int nbits);
extern int __bitmap_subset(const unsigned long *bitmap1,
@@ -176,7 +186,7 @@ bitmap_find_next_zero_area(unsigned long *map,
align_mask, 0);
}
-extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
+extern int bitmap_parse(const char *buf, unsigned int buflen,
unsigned long *dst, int nbits);
extern int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
@@ -432,12 +442,53 @@ static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *sr
__bitmap_shift_left(dst, src, shift, nbits);
}
-static inline int bitmap_parse(const char *buf, unsigned int buflen,
- unsigned long *maskp, int nmaskbits)
+static inline void bitmap_replace(unsigned long *dst,
+ const unsigned long *old,
+ const unsigned long *new,
+ const unsigned long *mask,
+ unsigned int nbits)
{
- return __bitmap_parse(buf, buflen, 0, maskp, nmaskbits);
+ if (small_const_nbits(nbits))
+ *dst = (*old & ~(*mask)) | (*new & *mask);
+ else
+ __bitmap_replace(dst, old, new, mask, nbits);
+}
+
+static inline void bitmap_next_clear_region(unsigned long *bitmap,
+ unsigned int *rs, unsigned int *re,
+ unsigned int end)
+{
+ *rs = find_next_zero_bit(bitmap, end, *rs);
+ *re = find_next_bit(bitmap, end, *rs + 1);
+}
+
+static inline void bitmap_next_set_region(unsigned long *bitmap,
+ unsigned int *rs, unsigned int *re,
+ unsigned int end)
+{
+ *rs = find_next_bit(bitmap, end, *rs);
+ *re = find_next_zero_bit(bitmap, end, *rs + 1);
}
+/*
+ * Bitmap region iterators. Iterates over the bitmap between [@start, @end).
+ * @rs and @re should be integer variables and will be set to start and end
+ * index of the current clear or set region.
+ */
+#define bitmap_for_each_clear_region(bitmap, rs, re, start, end) \
+ for ((rs) = (start), \
+ bitmap_next_clear_region((bitmap), &(rs), &(re), (end)); \
+ (rs) < (re); \
+ (rs) = (re) + 1, \
+ bitmap_next_clear_region((bitmap), &(rs), &(re), (end)))
+
+#define bitmap_for_each_set_region(bitmap, rs, re, start, end) \
+ for ((rs) = (start), \
+ bitmap_next_set_region((bitmap), &(rs), &(re), (end)); \
+ (rs) < (re); \
+ (rs) = (re) + 1, \
+ bitmap_next_set_region((bitmap), &(rs), &(re), (end)))
+
/**
* BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap.
* @n: u64 value
@@ -489,6 +540,39 @@ static inline void bitmap_from_u64(unsigned long *dst, u64 mask)
dst[1] = mask >> 32;
}
+/**
+ * bitmap_get_value8 - get an 8-bit value within a memory region
+ * @map: address to the bitmap memory region
+ * @start: bit offset of the 8-bit value; must be a multiple of 8
+ *
+ * Returns the 8-bit value located at the @start bit offset within the @src
+ * memory region.
+ */
+static inline unsigned long bitmap_get_value8(const unsigned long *map,
+ unsigned long start)
+{
+ const size_t index = BIT_WORD(start);
+ const unsigned long offset = start % BITS_PER_LONG;
+
+ return (map[index] >> offset) & 0xFF;
+}
+
+/**
+ * bitmap_set_value8 - set an 8-bit value within a memory region
+ * @map: address to the bitmap memory region
+ * @value: the 8-bit value; values wider than 8 bits may clobber bitmap
+ * @start: bit offset of the 8-bit value; must be a multiple of 8
+ */
+static inline void bitmap_set_value8(unsigned long *map, unsigned long value,
+ unsigned long start)
+{
+ const size_t index = BIT_WORD(start);
+ const unsigned long offset = start % BITS_PER_LONG;
+
+ map[index] &= ~(0xFFUL << offset);
+ map[index] |= value << offset;
+}
+
#endif /* __ASSEMBLY__ */
#endif /* __LINUX_BITMAP_H */
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index c94a9ff9f082..47f54b459c26 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -11,8 +11,11 @@
# define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n)))
#endif
-#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
+#define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(long))
+#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(u64))
+#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(u32))
+#define BITS_TO_BYTES(nr) DIV_ROUND_UP(nr, BITS_PER_TYPE(char))
extern unsigned int __sw_hweight8(unsigned int w);
extern unsigned int __sw_hweight16(unsigned int w);
@@ -47,6 +50,18 @@ extern unsigned long __sw_hweight64(__u64 w);
(bit) < (size); \
(bit) = find_next_zero_bit((addr), (size), (bit) + 1))
+/**
+ * for_each_set_clump8 - iterate over bitmap for each 8-bit clump with set bits
+ * @start: bit offset to start search and to store the current iteration offset
+ * @clump: location to store copy of current 8-bit clump
+ * @bits: bitmap address to base the search on
+ * @size: bitmap size in number of bits
+ */
+#define for_each_set_clump8(start, clump, bits, size) \
+ for ((start) = find_first_clump8(&(clump), (bits), (size)); \
+ (start) < (size); \
+ (start) = find_next_clump8(&(clump), (bits), (size), (start) + 8))
+
static inline int get_bitmask_order(unsigned int count)
{
int order;
diff --git a/include/linux/bits.h b/include/linux/bits.h
index 669d69441a62..a740bbcf3cd2 100644
--- a/include/linux/bits.h
+++ b/include/linux/bits.h
@@ -3,9 +3,9 @@
#define __LINUX_BITS_H
#include <linux/const.h>
+#include <vdso/bits.h>
#include <asm/bitsperlong.h>
-#define BIT(nr) (UL(1) << (nr))
#define BIT_ULL(nr) (ULL(1) << (nr))
#define BIT_MASK(nr) (UL(1) << ((nr) % BITS_PER_LONG))
#define BIT_WORD(nr) ((nr) / BITS_PER_LONG)
diff --git a/include/linux/blk-cgroup.h b/include/linux/blk-cgroup.h
index bed9e43f9426..e4a6949fd171 100644
--- a/include/linux/blk-cgroup.h
+++ b/include/linux/blk-cgroup.h
@@ -15,7 +15,9 @@
*/
#include <linux/cgroup.h>
+#include <linux/percpu.h>
#include <linux/percpu_counter.h>
+#include <linux/u64_stats_sync.h>
#include <linux/seq_file.h>
#include <linux/radix-tree.h>
#include <linux/blkdev.h>
@@ -31,15 +33,12 @@
#ifdef CONFIG_BLK_CGROUP
-enum blkg_rwstat_type {
- BLKG_RWSTAT_READ,
- BLKG_RWSTAT_WRITE,
- BLKG_RWSTAT_SYNC,
- BLKG_RWSTAT_ASYNC,
- BLKG_RWSTAT_DISCARD,
+enum blkg_iostat_type {
+ BLKG_IOSTAT_READ,
+ BLKG_IOSTAT_WRITE,
+ BLKG_IOSTAT_DISCARD,
- BLKG_RWSTAT_NR,
- BLKG_RWSTAT_TOTAL = BLKG_RWSTAT_NR,
+ BLKG_IOSTAT_NR,
};
struct blkcg_gq;
@@ -61,17 +60,15 @@ struct blkcg {
#endif
};
-/*
- * blkg_[rw]stat->aux_cnt is excluded for local stats but included for
- * recursive. Used to carry stats of dead children.
- */
-struct blkg_rwstat {
- struct percpu_counter cpu_cnt[BLKG_RWSTAT_NR];
- atomic64_t aux_cnt[BLKG_RWSTAT_NR];
+struct blkg_iostat {
+ u64 bytes[BLKG_IOSTAT_NR];
+ u64 ios[BLKG_IOSTAT_NR];
};
-struct blkg_rwstat_sample {
- u64 cnt[BLKG_RWSTAT_NR];
+struct blkg_iostat_set {
+ struct u64_stats_sync sync;
+ struct blkg_iostat cur;
+ struct blkg_iostat last;
};
/*
@@ -127,8 +124,8 @@ struct blkcg_gq {
/* is this blkg online? protected by both blkcg and q locks */
bool online;
- struct blkg_rwstat stat_bytes;
- struct blkg_rwstat stat_ios;
+ struct blkg_iostat_set __percpu *iostat_cpu;
+ struct blkg_iostat_set iostat;
struct blkg_policy_data *pd[BLKCG_MAX_POLS];
@@ -191,7 +188,6 @@ struct blkcg_gq *__blkg_lookup_create(struct blkcg *blkcg,
struct blkcg_gq *blkg_lookup_create(struct blkcg *blkcg,
struct request_queue *q);
int blkcg_init_queue(struct request_queue *q);
-void blkcg_drain_queue(struct request_queue *q);
void blkcg_exit_queue(struct request_queue *q);
/* Blkio controller policy registration */
@@ -202,13 +198,6 @@ int blkcg_activate_policy(struct request_queue *q,
void blkcg_deactivate_policy(struct request_queue *q,
const struct blkcg_policy *pol);
-static inline u64 blkg_rwstat_read_counter(struct blkg_rwstat *rwstat,
- unsigned int idx)
-{
- return atomic64_read(&rwstat->aux_cnt[idx]) +
- percpu_counter_sum_positive(&rwstat->cpu_cnt[idx]);
-}
-
const char *blkg_dev_name(struct blkcg_gq *blkg);
void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
u64 (*prfill)(struct seq_file *,
@@ -216,17 +205,6 @@ void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
const struct blkcg_policy *pol, int data,
bool show_total);
u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
-u64 __blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
- const struct blkg_rwstat_sample *rwstat);
-u64 blkg_prfill_rwstat(struct seq_file *sf, struct blkg_policy_data *pd,
- int off);
-int blkg_print_stat_bytes(struct seq_file *sf, void *v);
-int blkg_print_stat_ios(struct seq_file *sf, void *v);
-int blkg_print_stat_bytes_recursive(struct seq_file *sf, void *v);
-int blkg_print_stat_ios_recursive(struct seq_file *sf, void *v);
-
-void blkg_rwstat_recursive_sum(struct blkcg_gq *blkg, struct blkcg_policy *pol,
- int off, struct blkg_rwstat_sample *sum);
struct blkg_conf_ctx {
struct gendisk *disk;
@@ -578,128 +556,6 @@ static inline void blkg_put(struct blkcg_gq *blkg)
if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
(p_blkg)->q, false)))
-static inline int blkg_rwstat_init(struct blkg_rwstat *rwstat, gfp_t gfp)
-{
- int i, ret;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++) {
- ret = percpu_counter_init(&rwstat->cpu_cnt[i], 0, gfp);
- if (ret) {
- while (--i >= 0)
- percpu_counter_destroy(&rwstat->cpu_cnt[i]);
- return ret;
- }
- atomic64_set(&rwstat->aux_cnt[i], 0);
- }
- return 0;
-}
-
-static inline void blkg_rwstat_exit(struct blkg_rwstat *rwstat)
-{
- int i;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- percpu_counter_destroy(&rwstat->cpu_cnt[i]);
-}
-
-/**
- * blkg_rwstat_add - add a value to a blkg_rwstat
- * @rwstat: target blkg_rwstat
- * @op: REQ_OP and flags
- * @val: value to add
- *
- * Add @val to @rwstat. The counters are chosen according to @rw. The
- * caller is responsible for synchronizing calls to this function.
- */
-static inline void blkg_rwstat_add(struct blkg_rwstat *rwstat,
- unsigned int op, uint64_t val)
-{
- struct percpu_counter *cnt;
-
- if (op_is_discard(op))
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_DISCARD];
- else if (op_is_write(op))
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_WRITE];
- else
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_READ];
-
- percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
-
- if (op_is_sync(op))
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_SYNC];
- else
- cnt = &rwstat->cpu_cnt[BLKG_RWSTAT_ASYNC];
-
- percpu_counter_add_batch(cnt, val, BLKG_STAT_CPU_BATCH);
-}
-
-/**
- * blkg_rwstat_read - read the current values of a blkg_rwstat
- * @rwstat: blkg_rwstat to read
- *
- * Read the current snapshot of @rwstat and return it in the aux counts.
- */
-static inline void blkg_rwstat_read(struct blkg_rwstat *rwstat,
- struct blkg_rwstat_sample *result)
-{
- int i;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- result->cnt[i] =
- percpu_counter_sum_positive(&rwstat->cpu_cnt[i]);
-}
-
-/**
- * blkg_rwstat_total - read the total count of a blkg_rwstat
- * @rwstat: blkg_rwstat to read
- *
- * Return the total count of @rwstat regardless of the IO direction. This
- * function can be called without synchronization and takes care of u64
- * atomicity.
- */
-static inline uint64_t blkg_rwstat_total(struct blkg_rwstat *rwstat)
-{
- struct blkg_rwstat_sample tmp = { };
-
- blkg_rwstat_read(rwstat, &tmp);
- return tmp.cnt[BLKG_RWSTAT_READ] + tmp.cnt[BLKG_RWSTAT_WRITE];
-}
-
-/**
- * blkg_rwstat_reset - reset a blkg_rwstat
- * @rwstat: blkg_rwstat to reset
- */
-static inline void blkg_rwstat_reset(struct blkg_rwstat *rwstat)
-{
- int i;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++) {
- percpu_counter_set(&rwstat->cpu_cnt[i], 0);
- atomic64_set(&rwstat->aux_cnt[i], 0);
- }
-}
-
-/**
- * blkg_rwstat_add_aux - add a blkg_rwstat into another's aux count
- * @to: the destination blkg_rwstat
- * @from: the source
- *
- * Add @from's count including the aux one to @to's aux count.
- */
-static inline void blkg_rwstat_add_aux(struct blkg_rwstat *to,
- struct blkg_rwstat *from)
-{
- u64 sum[BLKG_RWSTAT_NR];
- int i;
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- sum[i] = percpu_counter_sum_positive(&from->cpu_cnt[i]);
-
- for (i = 0; i < BLKG_RWSTAT_NR; i++)
- atomic64_add(sum[i] + atomic64_read(&from->aux_cnt[i]),
- &to->aux_cnt[i]);
-}
-
#ifdef CONFIG_BLK_DEV_THROTTLING
extern bool blk_throtl_bio(struct request_queue *q, struct blkcg_gq *blkg,
struct bio *bio);
@@ -745,15 +601,33 @@ static inline bool blkcg_bio_issue_check(struct request_queue *q,
throtl = blk_throtl_bio(q, blkg, bio);
if (!throtl) {
+ struct blkg_iostat_set *bis;
+ int rwd, cpu;
+
+ if (op_is_discard(bio->bi_opf))
+ rwd = BLKG_IOSTAT_DISCARD;
+ else if (op_is_write(bio->bi_opf))
+ rwd = BLKG_IOSTAT_WRITE;
+ else
+ rwd = BLKG_IOSTAT_READ;
+
+ cpu = get_cpu();
+ bis = per_cpu_ptr(blkg->iostat_cpu, cpu);
+ u64_stats_update_begin(&bis->sync);
+
/*
* If the bio is flagged with BIO_QUEUE_ENTERED it means this
* is a split bio and we would have already accounted for the
* size of the bio.
*/
if (!bio_flagged(bio, BIO_QUEUE_ENTERED))
- blkg_rwstat_add(&blkg->stat_bytes, bio->bi_opf,
- bio->bi_iter.bi_size);
- blkg_rwstat_add(&blkg->stat_ios, bio->bi_opf, 1);
+ bis->cur.bytes[rwd] += bio->bi_iter.bi_size;
+ bis->cur.ios[rwd]++;
+
+ u64_stats_update_end(&bis->sync);
+ if (cgroup_subsys_on_dfl(io_cgrp_subsys))
+ cgroup_rstat_updated(blkg->blkcg->css.cgroup, cpu);
+ put_cpu();
}
blkcg_bio_issue_init(bio);
@@ -845,7 +719,6 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { ret
static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
{ return NULL; }
static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
-static inline void blkcg_drain_queue(struct request_queue *q) { }
static inline void blkcg_exit_queue(struct request_queue *q) { }
static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 0bf056de5cc3..f389d7c724bd 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -10,103 +10,242 @@ struct blk_mq_tags;
struct blk_flush_queue;
/**
- * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware block device
+ * struct blk_mq_hw_ctx - State for a hardware queue facing the hardware
+ * block device
*/
struct blk_mq_hw_ctx {
struct {
+ /** @lock: Protects the dispatch list. */
spinlock_t lock;
+ /**
+ * @dispatch: Used for requests that are ready to be
+ * dispatched to the hardware but for some reason (e.g. lack of
+ * resources) could not be sent to the hardware. As soon as the
+ * driver can send new requests, requests at this list will
+ * be sent first for a fairer dispatch.
+ */
struct list_head dispatch;
- unsigned long state; /* BLK_MQ_S_* flags */
+ /**
+ * @state: BLK_MQ_S_* flags. Defines the state of the hw
+ * queue (active, scheduled to restart, stopped).
+ */
+ unsigned long state;
} ____cacheline_aligned_in_smp;
+ /**
+ * @run_work: Used for scheduling a hardware queue run at a later time.
+ */
struct delayed_work run_work;
+ /** @cpumask: Map of available CPUs where this hctx can run. */
cpumask_var_t cpumask;
+ /**
+ * @next_cpu: Used by blk_mq_hctx_next_cpu() for round-robin CPU
+ * selection from @cpumask.
+ */
int next_cpu;
+ /**
+ * @next_cpu_batch: Counter of how many works left in the batch before
+ * changing to the next CPU.
+ */
int next_cpu_batch;
- unsigned long flags; /* BLK_MQ_F_* flags */
+ /** @flags: BLK_MQ_F_* flags. Defines the behaviour of the queue. */
+ unsigned long flags;
+ /**
+ * @sched_data: Pointer owned by the IO scheduler attached to a request
+ * queue. It's up to the IO scheduler how to use this pointer.
+ */
void *sched_data;
+ /**
+ * @queue: Pointer to the request queue that owns this hardware context.
+ */
struct request_queue *queue;
+ /** @fq: Queue of requests that need to perform a flush operation. */
struct blk_flush_queue *fq;
+ /**
+ * @driver_data: Pointer to data owned by the block driver that created
+ * this hctx
+ */
void *driver_data;
+ /**
+ * @ctx_map: Bitmap for each software queue. If bit is on, there is a
+ * pending request in that software queue.
+ */
struct sbitmap ctx_map;
+ /**
+ * @dispatch_from: Software queue to be used when no scheduler was
+ * selected.
+ */
struct blk_mq_ctx *dispatch_from;
+ /**
+ * @dispatch_busy: Number used by blk_mq_update_dispatch_busy() to
+ * decide if the hw_queue is busy using Exponential Weighted Moving
+ * Average algorithm.
+ */
unsigned int dispatch_busy;
+ /** @type: HCTX_TYPE_* flags. Type of hardware queue. */
unsigned short type;
+ /** @nr_ctx: Number of software queues. */
unsigned short nr_ctx;
+ /** @ctxs: Array of software queues. */
struct blk_mq_ctx **ctxs;
+ /** @dispatch_wait_lock: Lock for dispatch_wait queue. */
spinlock_t dispatch_wait_lock;
+ /**
+ * @dispatch_wait: Waitqueue to put requests when there is no tag
+ * available at the moment, to wait for another try in the future.
+ */
wait_queue_entry_t dispatch_wait;
+
+ /**
+ * @wait_index: Index of next available dispatch_wait queue to insert
+ * requests.
+ */
atomic_t wait_index;
+ /**
+ * @tags: Tags owned by the block driver. A tag at this set is only
+ * assigned when a request is dispatched from a hardware queue.
+ */
struct blk_mq_tags *tags;
+ /**
+ * @sched_tags: Tags owned by I/O scheduler. If there is an I/O
+ * scheduler associated with a request queue, a tag is assigned when
+ * that request is allocated. Else, this member is not used.
+ */
struct blk_mq_tags *sched_tags;
+ /** @queued: Number of queued requests. */
unsigned long queued;
+ /** @run: Number of dispatched requests. */
unsigned long run;
#define BLK_MQ_MAX_DISPATCH_ORDER 7
+ /** @dispatched: Number of dispatch requests by queue. */
unsigned long dispatched[BLK_MQ_MAX_DISPATCH_ORDER];
+ /** @numa_node: NUMA node the storage adapter has been connected to. */
unsigned int numa_node;
+ /** @queue_num: Index of this hardware queue. */
unsigned int queue_num;
+ /**
+ * @nr_active: Number of active requests. Only used when a tag set is
+ * shared across request queues.
+ */
atomic_t nr_active;
+ /** @cpuhp_dead: List to store request if some CPU die. */
struct hlist_node cpuhp_dead;
+ /** @kobj: Kernel object for sysfs. */
struct kobject kobj;
+ /** @poll_considered: Count times blk_poll() was called. */
unsigned long poll_considered;
+ /** @poll_invoked: Count how many requests blk_poll() polled. */
unsigned long poll_invoked;
+ /** @poll_success: Count how many polled requests were completed. */
unsigned long poll_success;
#ifdef CONFIG_BLK_DEBUG_FS
+ /**
+ * @debugfs_dir: debugfs directory for this hardware queue. Named
+ * as cpu<cpu_number>.
+ */
struct dentry *debugfs_dir;
+ /** @sched_debugfs_dir: debugfs directory for the scheduler. */
struct dentry *sched_debugfs_dir;
#endif
+ /**
+ * @hctx_list: if this hctx is not in use, this is an entry in
+ * q->unused_hctx_list.
+ */
struct list_head hctx_list;
- /* Must be the last member - see also blk_mq_hw_ctx_size(). */
+ /**
+ * @srcu: Sleepable RCU. Use as lock when type of the hardware queue is
+ * blocking (BLK_MQ_F_BLOCKING). Must be the last member - see also
+ * blk_mq_hw_ctx_size().
+ */
struct srcu_struct srcu[0];
};
+/**
+ * struct blk_mq_queue_map - Map software queues to hardware queues
+ * @mq_map: CPU ID to hardware queue index map. This is an array
+ * with nr_cpu_ids elements. Each element has a value in the range
+ * [@queue_offset, @queue_offset + @nr_queues).
+ * @nr_queues: Number of hardware queues to map CPU IDs onto.
+ * @queue_offset: First hardware queue to map onto. Used by the PCIe NVMe
+ * driver to map each hardware queue type (enum hctx_type) onto a distinct
+ * set of hardware queues.
+ */
struct blk_mq_queue_map {
unsigned int *mq_map;
unsigned int nr_queues;
unsigned int queue_offset;
};
+/**
+ * enum hctx_type - Type of hardware queue
+ * @HCTX_TYPE_DEFAULT: All I/O not otherwise accounted for.
+ * @HCTX_TYPE_READ: Just for READ I/O.
+ * @HCTX_TYPE_POLL: Polled I/O of any kind.
+ * @HCTX_MAX_TYPES: Number of types of hctx.
+ */
enum hctx_type {
- HCTX_TYPE_DEFAULT, /* all I/O not otherwise accounted for */
- HCTX_TYPE_READ, /* just for READ I/O */
- HCTX_TYPE_POLL, /* polled I/O of any kind */
+ HCTX_TYPE_DEFAULT,
+ HCTX_TYPE_READ,
+ HCTX_TYPE_POLL,
HCTX_MAX_TYPES,
};
+/**
+ * struct blk_mq_tag_set - tag set that can be shared between request queues
+ * @map: One or more ctx -> hctx mappings. One map exists for each
+ * hardware queue type (enum hctx_type) that the driver wishes
+ * to support. There are no restrictions on maps being of the
+ * same size, and it's perfectly legal to share maps between
+ * types.
+ * @nr_maps: Number of elements in the @map array. A number in the range
+ * [1, HCTX_MAX_TYPES].
+ * @ops: Pointers to functions that implement block driver behavior.
+ * @nr_hw_queues: Number of hardware queues supported by the block driver that
+ * owns this data structure.
+ * @queue_depth: Number of tags per hardware queue, reserved tags included.
+ * @reserved_tags: Number of tags to set aside for BLK_MQ_REQ_RESERVED tag
+ * allocations.
+ * @cmd_size: Number of additional bytes to allocate per request. The block
+ * driver owns these additional bytes.
+ * @numa_node: NUMA node the storage adapter has been connected to.
+ * @timeout: Request processing timeout in jiffies.
+ * @flags: Zero or more BLK_MQ_F_* flags.
+ * @driver_data: Pointer to data owned by the block driver that created this
+ * tag set.
+ * @tags: Tag sets. One tag set per hardware queue. Has @nr_hw_queues
+ * elements.
+ * @tag_list_lock: Serializes tag_list accesses.
+ * @tag_list: List of the request queues that use this tag set. See also
+ * request_queue.tag_set_list.
+ */
struct blk_mq_tag_set {
- /*
- * map[] holds ctx -> hctx mappings, one map exists for each type
- * that the driver wishes to support. There are no restrictions
- * on maps being of the same size, and it's perfectly legal to
- * share maps between types.
- */
struct blk_mq_queue_map map[HCTX_MAX_TYPES];
- unsigned int nr_maps; /* nr entries in map[] */
+ unsigned int nr_maps;
const struct blk_mq_ops *ops;
- unsigned int nr_hw_queues; /* nr hw queues across maps */
- unsigned int queue_depth; /* max hw supported */
+ unsigned int nr_hw_queues;
+ unsigned int queue_depth;
unsigned int reserved_tags;
- unsigned int cmd_size; /* per-request extra data */
+ unsigned int cmd_size;
int numa_node;
unsigned int timeout;
- unsigned int flags; /* BLK_MQ_F_* */
+ unsigned int flags;
void *driver_data;
struct blk_mq_tags **tags;
@@ -115,6 +254,12 @@ struct blk_mq_tag_set {
struct list_head tag_list;
};
+/**
+ * struct blk_mq_queue_data - Data about a request inserted in a queue
+ *
+ * @rq: Request pointer.
+ * @last: If it is the last request in the queue.
+ */
struct blk_mq_queue_data {
struct request *rq;
bool last;
@@ -142,81 +287,101 @@ typedef bool (busy_fn)(struct request_queue *);
typedef void (complete_fn)(struct request *);
typedef void (cleanup_rq_fn)(struct request *);
-
+/**
+ * struct blk_mq_ops - Callback functions that implements block driver
+ * behaviour.
+ */
struct blk_mq_ops {
- /*
- * Queue request
+ /**
+ * @queue_rq: Queue a new request from block IO.
*/
queue_rq_fn *queue_rq;
- /*
- * If a driver uses bd->last to judge when to submit requests to
- * hardware, it must define this function. In case of errors that
- * make us stop issuing further requests, this hook serves the
+ /**
+ * @commit_rqs: If a driver uses bd->last to judge when to submit
+ * requests to hardware, it must define this function. In case of errors
+ * that make us stop issuing further requests, this hook serves the
* purpose of kicking the hardware (which the last request otherwise
* would have done).
*/
commit_rqs_fn *commit_rqs;
- /*
- * Reserve budget before queue request, once .queue_rq is
+ /**
+ * @get_budget: Reserve budget before queue request, once .queue_rq is
* run, it is driver's responsibility to release the
* reserved budget. Also we have to handle failure case
* of .get_budget for avoiding I/O deadlock.
*/
get_budget_fn *get_budget;
+ /**
+ * @put_budget: Release the reserved budget.
+ */
put_budget_fn *put_budget;
- /*
- * Called on request timeout
+ /**
+ * @timeout: Called on request timeout.
*/
timeout_fn *timeout;
- /*
- * Called to poll for completion of a specific tag.
+ /**
+ * @poll: Called to poll for completion of a specific tag.
*/
poll_fn *poll;
+ /**
+ * @complete: Mark the request as complete.
+ */
complete_fn *complete;
- /*
- * Called when the block layer side of a hardware queue has been
- * set up, allowing the driver to allocate/init matching structures.
- * Ditto for exit/teardown.
+ /**
+ * @init_hctx: Called when the block layer side of a hardware queue has
+ * been set up, allowing the driver to allocate/init matching
+ * structures.
*/
init_hctx_fn *init_hctx;
+ /**
+ * @exit_hctx: Ditto for exit/teardown.
+ */
exit_hctx_fn *exit_hctx;
- /*
- * Called for every command allocated by the block layer to allow
- * the driver to set up driver specific data.
+ /**
+ * @init_request: Called for every command allocated by the block layer
+ * to allow the driver to set up driver specific data.
*
* Tag greater than or equal to queue_depth is for setting up
* flush request.
- *
- * Ditto for exit/teardown.
*/
init_request_fn *init_request;
+ /**
+ * @exit_request: Ditto for exit/teardown.
+ */
exit_request_fn *exit_request;
- /* Called from inside blk_get_request() */
+
+ /**
+ * @initialize_rq_fn: Called from inside blk_get_request().
+ */
void (*initialize_rq_fn)(struct request *rq);
- /*
- * Called before freeing one request which isn't completed yet,
- * and usually for freeing the driver private data
+ /**
+ * @cleanup_rq: Called before freeing one request which isn't completed
+ * yet, and usually for freeing the driver private data.
*/
cleanup_rq_fn *cleanup_rq;
- /*
- * If set, returns whether or not this queue currently is busy
+ /**
+ * @busy: If set, returns whether or not this queue currently is busy.
*/
busy_fn *busy;
+ /**
+ * @map_queues: This allows drivers specify their own queue mapping by
+ * overriding the setup-time function that builds the mq_map.
+ */
map_queues_fn *map_queues;
#ifdef CONFIG_BLK_DEBUG_FS
- /*
- * Used by the debugfs implementation to show driver-specific
+ /**
+ * @show_rq: Used by the debugfs implementation to show driver-specific
* information about a request.
*/
void (*show_rq)(struct seq_file *m, struct request *rq);
@@ -247,6 +412,8 @@ enum {
<< BLK_MQ_F_ALLOC_POLICY_START_BIT)
struct request_queue *blk_mq_init_queue(struct blk_mq_tag_set *);
+struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
+ void *queuedata);
struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
struct request_queue *q,
bool elevator_init);
@@ -262,7 +429,6 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set);
void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
void blk_mq_free_request(struct request *rq);
-bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
bool blk_mq_queue_inflight(struct request_queue *q);
@@ -301,9 +467,25 @@ static inline u16 blk_mq_unique_tag_to_tag(u32 unique_tag)
return unique_tag & BLK_MQ_UNIQUE_TAG_MASK;
}
+/**
+ * blk_mq_rq_state() - read the current MQ_RQ_* state of a request
+ * @rq: target request.
+ */
+static inline enum mq_rq_state blk_mq_rq_state(struct request *rq)
+{
+ return READ_ONCE(rq->state);
+}
+
+static inline int blk_mq_request_started(struct request *rq)
+{
+ return blk_mq_rq_state(rq) != MQ_RQ_IDLE;
+}
+
+static inline int blk_mq_request_completed(struct request *rq)
+{
+ return blk_mq_rq_state(rq) == MQ_RQ_COMPLETE;
+}
-int blk_mq_request_started(struct request *rq);
-int blk_mq_request_completed(struct request *rq);
void blk_mq_start_request(struct request *rq);
void blk_mq_end_request(struct request *rq, blk_status_t error);
void __blk_mq_end_request(struct request *rq, blk_status_t error);
@@ -324,7 +506,7 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_quiesce_queue(struct request_queue *q);
void blk_mq_unquiesce_queue(struct request_queue *q);
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
-bool blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
+void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv);
@@ -343,14 +525,29 @@ void blk_mq_quiesce_queue_nowait(struct request_queue *q);
unsigned int blk_mq_rq_cpu(struct request *rq);
-/*
+/**
+ * blk_mq_rq_from_pdu - cast a PDU to a request
+ * @pdu: the PDU (Protocol Data Unit) to be casted
+ *
+ * Return: request
+ *
* Driver command data is immediately after the request. So subtract request
- * size to get back to the original request, add request size to get the PDU.
+ * size to get back to the original request.
*/
static inline struct request *blk_mq_rq_from_pdu(void *pdu)
{
return pdu - sizeof(struct request);
}
+
+/**
+ * blk_mq_rq_to_pdu - cast a request to a PDU
+ * @rq: the request to be casted
+ *
+ * Return: pointer to the PDU
+ *
+ * Driver command data is immediately after the request. So add request to get
+ * the PDU.
+ */
static inline void *blk_mq_rq_to_pdu(struct request *rq)
{
return rq + 1;
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index d688b96d1d63..70254ae11769 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -153,10 +153,10 @@ struct bio {
unsigned short bi_write_hint;
blk_status_t bi_status;
u8 bi_partno;
+ atomic_t __bi_remaining;
struct bvec_iter bi_iter;
- atomic_t __bi_remaining;
bio_end_io_t *bi_end_io;
void *bi_private;
@@ -290,6 +290,12 @@ enum req_opf {
REQ_OP_ZONE_RESET_ALL = 8,
/* write the zero filled sector many times */
REQ_OP_WRITE_ZEROES = 9,
+ /* Open a zone */
+ REQ_OP_ZONE_OPEN = 10,
+ /* Close a zone */
+ REQ_OP_ZONE_CLOSE = 11,
+ /* Transition a zone to full */
+ REQ_OP_ZONE_FINISH = 12,
/* SCSI passthrough using struct scsi_request */
REQ_OP_SCSI_IN = 32,
@@ -371,6 +377,7 @@ enum stat_group {
STAT_READ,
STAT_WRITE,
STAT_DISCARD,
+ STAT_FLUSH,
NR_STAT_GROUPS
};
@@ -417,6 +424,25 @@ static inline bool op_is_discard(unsigned int op)
return (op & REQ_OP_MASK) == REQ_OP_DISCARD;
}
+/*
+ * Check if a bio or request operation is a zone management operation, with
+ * the exception of REQ_OP_ZONE_RESET_ALL which is treated as a special case
+ * due to its different handling in the block layer and device response in
+ * case of command failure.
+ */
+static inline bool op_is_zone_mgmt(enum req_opf op)
+{
+ switch (op & REQ_OP_MASK) {
+ case REQ_OP_ZONE_RESET:
+ case REQ_OP_ZONE_OPEN:
+ case REQ_OP_ZONE_CLOSE:
+ case REQ_OP_ZONE_FINISH:
+ return true;
+ default:
+ return false;
+ }
+}
+
static inline int op_stat_group(unsigned int op)
{
if (op_is_discard(op))
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f3ea78b0c91c..32868fbedc9e 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -328,6 +328,7 @@ struct queue_limits {
unsigned int max_sectors;
unsigned int max_segment_size;
unsigned int physical_block_size;
+ unsigned int logical_block_size;
unsigned int alignment_offset;
unsigned int io_min;
unsigned int io_opt;
@@ -338,7 +339,6 @@ struct queue_limits {
unsigned int discard_granularity;
unsigned int discard_alignment;
- unsigned short logical_block_size;
unsigned short max_segments;
unsigned short max_integrity_segments;
unsigned short max_discard_segments;
@@ -349,34 +349,28 @@ struct queue_limits {
enum blk_zoned_model zoned;
};
+typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx,
+ void *data);
+
#ifdef CONFIG_BLK_DEV_ZONED
-/*
- * Maximum number of zones to report with a single report zones command.
- */
-#define BLK_ZONED_REPORT_MAX_ZONES 8192U
-
-extern unsigned int blkdev_nr_zones(struct block_device *bdev);
-extern int blkdev_report_zones(struct block_device *bdev,
- sector_t sector, struct blk_zone *zones,
- unsigned int *nr_zones);
-extern int blkdev_reset_zones(struct block_device *bdev, sector_t sectors,
- sector_t nr_sectors, gfp_t gfp_mask);
+#define BLK_ALL_ZONES ((unsigned int)-1)
+int blkdev_report_zones(struct block_device *bdev, sector_t sector,
+ unsigned int nr_zones, report_zones_cb cb, void *data);
+unsigned int blkdev_nr_zones(struct gendisk *disk);
+extern int blkdev_zone_mgmt(struct block_device *bdev, enum req_opf op,
+ sector_t sectors, sector_t nr_sectors,
+ gfp_t gfp_mask);
extern int blk_revalidate_disk_zones(struct gendisk *disk);
extern int blkdev_report_zones_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg);
-extern int blkdev_reset_zones_ioctl(struct block_device *bdev, fmode_t mode,
- unsigned int cmd, unsigned long arg);
+extern int blkdev_zone_mgmt_ioctl(struct block_device *bdev, fmode_t mode,
+ unsigned int cmd, unsigned long arg);
#else /* CONFIG_BLK_DEV_ZONED */
-static inline unsigned int blkdev_nr_zones(struct block_device *bdev)
-{
- return 0;
-}
-
-static inline int blk_revalidate_disk_zones(struct gendisk *disk)
+static inline unsigned int blkdev_nr_zones(struct gendisk *disk)
{
return 0;
}
@@ -388,9 +382,9 @@ static inline int blkdev_report_zones_ioctl(struct block_device *bdev,
return -ENOTTY;
}
-static inline int blkdev_reset_zones_ioctl(struct block_device *bdev,
- fmode_t mode, unsigned int cmd,
- unsigned long arg)
+static inline int blkdev_zone_mgmt_ioctl(struct block_device *bdev,
+ fmode_t mode, unsigned int cmd,
+ unsigned long arg)
{
return -ENOTTY;
}
@@ -411,7 +405,6 @@ struct request_queue {
/* sw queues */
struct blk_mq_ctx __percpu *queue_ctx;
- unsigned int nr_queues;
unsigned int queue_depth;
@@ -505,9 +498,9 @@ struct request_queue {
/*
* Zoned block device information for request dispatch control.
* nr_zones is the total number of zones of the device. This is always
- * 0 for regular block devices. seq_zones_bitmap is a bitmap of nr_zones
- * bits which indicates if a zone is conventional (bit clear) or
- * sequential (bit set). seq_zones_wlock is a bitmap of nr_zones
+ * 0 for regular block devices. conv_zones_bitmap is a bitmap of nr_zones
+ * bits which indicates if a zone is conventional (bit set) or
+ * sequential (bit clear). seq_zones_wlock is a bitmap of nr_zones
* bits which indicates if a zone is write locked, that is, if a write
* request targeting the zone was dispatched. All three fields are
* initialized by the low level device driver (e.g. scsi/sd.c).
@@ -520,7 +513,7 @@ struct request_queue {
* blk_mq_unfreeze_queue().
*/
unsigned int nr_zones;
- unsigned long *seq_zones_bitmap;
+ unsigned long *conv_zones_bitmap;
unsigned long *seq_zones_wlock;
#endif /* CONFIG_BLK_DEV_ZONED */
@@ -531,7 +524,7 @@ struct request_queue {
unsigned int sg_reserved_size;
int node;
#ifdef CONFIG_BLK_DEV_IO_TRACE
- struct blk_trace *blk_trace;
+ struct blk_trace __rcu *blk_trace;
struct mutex blk_trace_mutex;
#endif
/*
@@ -725,9 +718,11 @@ static inline unsigned int blk_queue_zone_no(struct request_queue *q,
static inline bool blk_queue_zone_is_seq(struct request_queue *q,
sector_t sector)
{
- if (!blk_queue_is_zoned(q) || !q->seq_zones_bitmap)
+ if (!blk_queue_is_zoned(q))
return false;
- return test_bit(blk_queue_zone_no(q, sector), q->seq_zones_bitmap);
+ if (!q->conv_zones_bitmap)
+ return true;
+ return !test_bit(blk_queue_zone_no(q, sector), q->conv_zones_bitmap);
}
#else /* CONFIG_BLK_DEV_ZONED */
static inline unsigned int blk_queue_nr_zones(struct request_queue *q)
@@ -870,6 +865,8 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
unsigned int, void __user *);
extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
struct scsi_ioctl_command __user *);
+extern int get_sg_io_hdr(struct sg_io_hdr *hdr, const void __user *argp);
+extern int put_sg_io_hdr(const struct sg_io_hdr *hdr, void __user *argp);
extern int blk_queue_enter(struct request_queue *q, blk_mq_req_flags_t flags);
extern void blk_queue_exit(struct request_queue *q);
@@ -955,6 +952,10 @@ static inline unsigned int blk_rq_stats_sectors(const struct request *rq)
}
#ifdef CONFIG_BLK_DEV_ZONED
+
+/* Helper to convert BLK_ZONE_ZONE_XXX to its string format XXX */
+const char *blk_zone_cond_str(enum blk_zone_cond zone_cond);
+
static inline unsigned int blk_rq_zone_no(struct request *rq)
{
return blk_queue_zone_no(rq->q, blk_rq_pos(rq));
@@ -1066,7 +1067,6 @@ extern void blk_abort_request(struct request *);
* Access functions for manipulating queue properties
*/
extern void blk_cleanup_queue(struct request_queue *);
-extern void blk_queue_make_request(struct request_queue *, make_request_fn *);
extern void blk_queue_bounce_limit(struct request_queue *, u64);
extern void blk_queue_max_hw_sectors(struct request_queue *, unsigned int);
extern void blk_queue_chunk_sectors(struct request_queue *, unsigned int);
@@ -1080,7 +1080,7 @@ extern void blk_queue_max_write_same_sectors(struct request_queue *q,
unsigned int max_write_same_sectors);
extern void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
unsigned int max_write_same_sectors);
-extern void blk_queue_logical_block_size(struct request_queue *, unsigned short);
+extern void blk_queue_logical_block_size(struct request_queue *, unsigned int);
extern void blk_queue_physical_block_size(struct request_queue *, unsigned int);
extern void blk_queue_alignment_offset(struct request_queue *q,
unsigned int alignment);
@@ -1143,8 +1143,7 @@ extern void blk_dump_rq_flags(struct request *, char *);
extern long nr_blockdev_pages(void);
bool __must_check blk_get_queue(struct request_queue *);
-struct request_queue *blk_alloc_queue(gfp_t);
-struct request_queue *blk_alloc_queue_node(gfp_t gfp_mask, int node_id);
+struct request_queue *blk_alloc_queue(make_request_fn make_request, int node_id);
extern void blk_put_queue(struct request_queue *);
extern void blk_set_queue_dying(struct request_queue *);
@@ -1294,7 +1293,7 @@ static inline unsigned int queue_max_segment_size(const struct request_queue *q)
return q->limits.max_segment_size;
}
-static inline unsigned short queue_logical_block_size(const struct request_queue *q)
+static inline unsigned queue_logical_block_size(const struct request_queue *q)
{
int retval = 512;
@@ -1304,7 +1303,7 @@ static inline unsigned short queue_logical_block_size(const struct request_queue
return retval;
}
-static inline unsigned short bdev_logical_block_size(struct block_device *bdev)
+static inline unsigned int bdev_logical_block_size(struct block_device *bdev)
{
return queue_logical_block_size(bdev_get_queue(bdev));
}
@@ -1487,17 +1486,7 @@ static inline unsigned int block_size(struct block_device *bdev)
return bdev->bd_block_size;
}
-typedef struct {struct page *v;} Sector;
-
-unsigned char *read_dev_sector(struct block_device *, sector_t, Sector *);
-
-static inline void put_dev_sector(Sector p)
-{
- put_page(p.v);
-}
-
int kblockd_schedule_work(struct work_struct *work);
-int kblockd_schedule_work_on(int cpu, struct work_struct *work);
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
#define MODULE_ALIAS_BLOCKDEV(major,minor) \
@@ -1709,11 +1698,19 @@ struct block_device_operations {
/* this callback is with swap_lock and sometimes page table lock held */
void (*swap_slot_free_notify) (struct block_device *, unsigned long);
int (*report_zones)(struct gendisk *, sector_t sector,
- struct blk_zone *zones, unsigned int *nr_zones);
+ unsigned int nr_zones, report_zones_cb cb, void *data);
+ char *(*devnode)(struct gendisk *disk, umode_t *mode);
struct module *owner;
const struct pr_ops *pr_ops;
};
+#ifdef CONFIG_COMPAT
+extern int blkdev_compat_ptr_ioctl(struct block_device *, fmode_t,
+ unsigned int, unsigned long);
+#else
+#define blkdev_compat_ptr_ioctl NULL
+#endif
+
extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
unsigned long);
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
index 7bb2d8de9f30..3b6ff5902edc 100644
--- a/include/linux/blktrace_api.h
+++ b/include/linux/blktrace_api.h
@@ -51,9 +51,13 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
**/
#define blk_add_cgroup_trace_msg(q, cg, fmt, ...) \
do { \
- struct blk_trace *bt = (q)->blk_trace; \
+ struct blk_trace *bt; \
+ \
+ rcu_read_lock(); \
+ bt = rcu_dereference((q)->blk_trace); \
if (unlikely(bt)) \
__trace_note_message(bt, cg, fmt, ##__VA_ARGS__);\
+ rcu_read_unlock(); \
} while (0)
#define blk_add_trace_msg(q, fmt, ...) \
blk_add_cgroup_trace_msg(q, NULL, fmt, ##__VA_ARGS__)
@@ -61,10 +65,14 @@ void __trace_note_message(struct blk_trace *, struct blkcg *blkcg, const char *f
static inline bool blk_trace_note_message_enabled(struct request_queue *q)
{
- struct blk_trace *bt = q->blk_trace;
- if (likely(!bt))
- return false;
- return bt->act_mask & BLK_TC_NOTIFY;
+ struct blk_trace *bt;
+ bool ret;
+
+ rcu_read_lock();
+ bt = rcu_dereference(q->blk_trace);
+ ret = bt && (bt->act_mask & BLK_TC_NOTIFY);
+ rcu_read_unlock();
+ return ret;
}
extern void blk_add_driver_data(struct request_queue *q, struct request *rq,
diff --git a/include/linux/bootconfig.h b/include/linux/bootconfig.h
new file mode 100644
index 000000000000..d11e183fcb54
--- /dev/null
+++ b/include/linux/bootconfig.h
@@ -0,0 +1,227 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_XBC_H
+#define _LINUX_XBC_H
+/*
+ * Extra Boot Config
+ * Copyright (C) 2019 Linaro Ltd.
+ * Author: Masami Hiramatsu <mhiramat@kernel.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#define BOOTCONFIG_MAGIC "#BOOTCONFIG\n"
+#define BOOTCONFIG_MAGIC_LEN 12
+
+/* XBC tree node */
+struct xbc_node {
+ u16 next;
+ u16 child;
+ u16 parent;
+ u16 data;
+} __attribute__ ((__packed__));
+
+#define XBC_KEY 0
+#define XBC_VALUE (1 << 15)
+/* Maximum size of boot config is 32KB - 1 */
+#define XBC_DATA_MAX (XBC_VALUE - 1)
+
+#define XBC_NODE_MAX 1024
+#define XBC_KEYLEN_MAX 256
+#define XBC_DEPTH_MAX 16
+
+/* Node tree access raw APIs */
+struct xbc_node * __init xbc_root_node(void);
+int __init xbc_node_index(struct xbc_node *node);
+struct xbc_node * __init xbc_node_get_parent(struct xbc_node *node);
+struct xbc_node * __init xbc_node_get_child(struct xbc_node *node);
+struct xbc_node * __init xbc_node_get_next(struct xbc_node *node);
+const char * __init xbc_node_get_data(struct xbc_node *node);
+
+/**
+ * xbc_node_is_value() - Test the node is a value node
+ * @node: An XBC node.
+ *
+ * Test the @node is a value node and return true if a value node, false if not.
+ */
+static inline __init bool xbc_node_is_value(struct xbc_node *node)
+{
+ return node->data & XBC_VALUE;
+}
+
+/**
+ * xbc_node_is_key() - Test the node is a key node
+ * @node: An XBC node.
+ *
+ * Test the @node is a key node and return true if a key node, false if not.
+ */
+static inline __init bool xbc_node_is_key(struct xbc_node *node)
+{
+ return !xbc_node_is_value(node);
+}
+
+/**
+ * xbc_node_is_array() - Test the node is an arraied value node
+ * @node: An XBC node.
+ *
+ * Test the @node is an arraied value node.
+ */
+static inline __init bool xbc_node_is_array(struct xbc_node *node)
+{
+ return xbc_node_is_value(node) && node->next != 0;
+}
+
+/**
+ * xbc_node_is_leaf() - Test the node is a leaf key node
+ * @node: An XBC node.
+ *
+ * Test the @node is a leaf key node which is a key node and has a value node
+ * or no child. Returns true if it is a leaf node, or false if not.
+ */
+static inline __init bool xbc_node_is_leaf(struct xbc_node *node)
+{
+ return xbc_node_is_key(node) &&
+ (!node->child || xbc_node_is_value(xbc_node_get_child(node)));
+}
+
+/* Tree-based key-value access APIs */
+struct xbc_node * __init xbc_node_find_child(struct xbc_node *parent,
+ const char *key);
+
+const char * __init xbc_node_find_value(struct xbc_node *parent,
+ const char *key,
+ struct xbc_node **vnode);
+
+struct xbc_node * __init xbc_node_find_next_leaf(struct xbc_node *root,
+ struct xbc_node *leaf);
+
+const char * __init xbc_node_find_next_key_value(struct xbc_node *root,
+ struct xbc_node **leaf);
+
+/**
+ * xbc_find_value() - Find a value which matches the key
+ * @key: Search key
+ * @vnode: A container pointer of XBC value node.
+ *
+ * Search a value whose key matches @key from whole of XBC tree and return
+ * the value if found. Found value node is stored in *@vnode.
+ * Note that this can return 0-length string and store NULL in *@vnode for
+ * key-only (non-value) entry.
+ */
+static inline const char * __init
+xbc_find_value(const char *key, struct xbc_node **vnode)
+{
+ return xbc_node_find_value(NULL, key, vnode);
+}
+
+/**
+ * xbc_find_node() - Find a node which matches the key
+ * @key: Search key
+ *
+ * Search a (key) node whose key matches @key from whole of XBC tree and
+ * return the node if found. If not found, returns NULL.
+ */
+static inline struct xbc_node * __init xbc_find_node(const char *key)
+{
+ return xbc_node_find_child(NULL, key);
+}
+
+/**
+ * xbc_array_for_each_value() - Iterate value nodes on an array
+ * @anode: An XBC arraied value node
+ * @value: A value
+ *
+ * Iterate array value nodes and values starts from @anode. This is expected to
+ * be used with xbc_find_value() and xbc_node_find_value(), so that user can
+ * process each array entry node.
+ */
+#define xbc_array_for_each_value(anode, value) \
+ for (value = xbc_node_get_data(anode); anode != NULL ; \
+ anode = xbc_node_get_next(anode), \
+ value = anode ? xbc_node_get_data(anode) : NULL)
+
+/**
+ * xbc_node_for_each_child() - Iterate child nodes
+ * @parent: An XBC node.
+ * @child: Iterated XBC node.
+ *
+ * Iterate child nodes of @parent. Each child nodes are stored to @child.
+ */
+#define xbc_node_for_each_child(parent, child) \
+ for (child = xbc_node_get_child(parent); child != NULL ; \
+ child = xbc_node_get_next(child))
+
+/**
+ * xbc_node_for_each_array_value() - Iterate array entries of geven key
+ * @node: An XBC node.
+ * @key: A key string searched under @node
+ * @anode: Iterated XBC node of array entry.
+ * @value: Iterated value of array entry.
+ *
+ * Iterate array entries of given @key under @node. Each array entry node
+ * is stroed to @anode and @value. If the @node doesn't have @key node,
+ * it does nothing.
+ * Note that even if the found key node has only one value (not array)
+ * this executes block once. Hoever, if the found key node has no value
+ * (key-only node), this does nothing. So don't use this for testing the
+ * key-value pair existence.
+ */
+#define xbc_node_for_each_array_value(node, key, anode, value) \
+ for (value = xbc_node_find_value(node, key, &anode); value != NULL; \
+ anode = xbc_node_get_next(anode), \
+ value = anode ? xbc_node_get_data(anode) : NULL)
+
+/**
+ * xbc_node_for_each_key_value() - Iterate key-value pairs under a node
+ * @node: An XBC node.
+ * @knode: Iterated key node
+ * @value: Iterated value string
+ *
+ * Iterate key-value pairs under @node. Each key node and value string are
+ * stored in @knode and @value respectively.
+ */
+#define xbc_node_for_each_key_value(node, knode, value) \
+ for (knode = NULL, value = xbc_node_find_next_key_value(node, &knode);\
+ knode != NULL; value = xbc_node_find_next_key_value(node, &knode))
+
+/**
+ * xbc_for_each_key_value() - Iterate key-value pairs
+ * @knode: Iterated key node
+ * @value: Iterated value string
+ *
+ * Iterate key-value pairs in whole XBC tree. Each key node and value string
+ * are stored in @knode and @value respectively.
+ */
+#define xbc_for_each_key_value(knode, value) \
+ xbc_node_for_each_key_value(NULL, knode, value)
+
+/* Compose partial key */
+int __init xbc_node_compose_key_after(struct xbc_node *root,
+ struct xbc_node *node, char *buf, size_t size);
+
+/**
+ * xbc_node_compose_key() - Compose full key string of the XBC node
+ * @node: An XBC node.
+ * @buf: A buffer to store the key.
+ * @size: The size of the @buf.
+ *
+ * Compose the full-length key of the @node into @buf. Returns the total
+ * length of the key stored in @buf. Or returns -EINVAL if @node is NULL,
+ * and -ERANGE if the key depth is deeper than max depth.
+ */
+static inline int __init xbc_node_compose_key(struct xbc_node *node,
+ char *buf, size_t size)
+{
+ return xbc_node_compose_key_after(NULL, node, buf, size);
+}
+
+/* XBC node initializer */
+int __init xbc_init(char *buf);
+
+/* XBC cleanup data structures */
+void __init xbc_destroy_all(void);
+
+/* Debug dump functions */
+void __init xbc_debug_dump(void);
+
+#endif
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 169fd25f6bc2..a11d5b7dbbf3 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -85,6 +85,7 @@ int cgroup_bpf_inherit(struct cgroup *cgrp);
void cgroup_bpf_offline(struct cgroup *cgrp);
int __cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
+ struct bpf_prog *replace_prog,
enum bpf_attach_type type, u32 flags);
int __cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type);
@@ -93,7 +94,8 @@ int __cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
/* Wrapper for __cgroup_bpf_*() protected by cgroup_mutex */
int cgroup_bpf_attach(struct cgroup *cgrp, struct bpf_prog *prog,
- enum bpf_attach_type type, u32 flags);
+ struct bpf_prog *replace_prog, enum bpf_attach_type type,
+ u32 flags);
int cgroup_bpf_detach(struct cgroup *cgrp, struct bpf_prog *prog,
enum bpf_attach_type type, u32 flags);
int cgroup_bpf_query(struct cgroup *cgrp, const union bpf_attr *attr,
@@ -157,8 +159,8 @@ void bpf_cgroup_storage_link(struct bpf_cgroup_storage *storage,
struct cgroup *cgroup,
enum bpf_attach_type type);
void bpf_cgroup_storage_unlink(struct bpf_cgroup_storage *storage);
-int bpf_cgroup_storage_assign(struct bpf_prog *prog, struct bpf_map *map);
-void bpf_cgroup_storage_release(struct bpf_prog *prog, struct bpf_map *map);
+int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux, struct bpf_map *map);
+void bpf_cgroup_storage_release(struct bpf_prog_aux *aux, struct bpf_map *map);
int bpf_percpu_cgroup_storage_copy(struct bpf_map *map, void *key, void *value);
int bpf_percpu_cgroup_storage_update(struct bpf_map *map, void *key,
@@ -360,9 +362,9 @@ static inline int cgroup_bpf_prog_query(const union bpf_attr *attr,
static inline void bpf_cgroup_storage_set(
struct bpf_cgroup_storage *storage[MAX_BPF_CGROUP_STORAGE_TYPE]) {}
-static inline int bpf_cgroup_storage_assign(struct bpf_prog *prog,
+static inline int bpf_cgroup_storage_assign(struct bpf_prog_aux *aux,
struct bpf_map *map) { return 0; }
-static inline void bpf_cgroup_storage_release(struct bpf_prog *prog,
+static inline void bpf_cgroup_storage_release(struct bpf_prog_aux *aux,
struct bpf_map *map) {}
static inline struct bpf_cgroup_storage *bpf_cgroup_storage_alloc(
struct bpf_prog *prog, enum bpf_cgroup_storage_type stype) { return NULL; }
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 3bf3835d0e86..212991f6f2a5 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -12,17 +12,24 @@
#include <linux/err.h>
#include <linux/rbtree_latch.h>
#include <linux/numa.h>
+#include <linux/mm_types.h>
#include <linux/wait.h>
#include <linux/u64_stats_sync.h>
+#include <linux/refcount.h>
+#include <linux/mutex.h>
+#include <linux/module.h>
struct bpf_verifier_env;
+struct bpf_verifier_log;
struct perf_event;
struct bpf_prog;
+struct bpf_prog_aux;
struct bpf_map;
struct sock;
struct seq_file;
struct btf;
struct btf_type;
+struct exception_table_entry;
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
@@ -37,6 +44,15 @@ struct bpf_map_ops {
int (*map_get_next_key)(struct bpf_map *map, void *key, void *next_key);
void (*map_release_uref)(struct bpf_map *map);
void *(*map_lookup_elem_sys_only)(struct bpf_map *map, void *key);
+ int (*map_lookup_batch)(struct bpf_map *map, const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+ int (*map_lookup_and_delete_batch)(struct bpf_map *map,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+ int (*map_update_batch)(struct bpf_map *map, const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+ int (*map_delete_batch)(struct bpf_map *map, const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
/* funcs callable from userspace and from eBPF programs */
void *(*map_lookup_elem)(struct bpf_map *map, void *key);
@@ -59,11 +75,18 @@ struct bpf_map_ops {
const struct btf_type *key_type,
const struct btf_type *value_type);
+ /* Prog poke tracking helpers. */
+ int (*map_poke_track)(struct bpf_map *map, struct bpf_prog_aux *aux);
+ void (*map_poke_untrack)(struct bpf_map *map, struct bpf_prog_aux *aux);
+ void (*map_poke_run)(struct bpf_map *map, u32 key, struct bpf_prog *old,
+ struct bpf_prog *new);
+
/* Direct value access helpers. */
int (*map_direct_value_addr)(const struct bpf_map *map,
u64 *imm, u32 off);
int (*map_direct_value_meta)(const struct bpf_map *map,
u64 imm, u32 *off);
+ int (*map_mmap)(struct bpf_map *map, struct vm_area_struct *vma);
};
struct bpf_map_memory {
@@ -92,17 +115,20 @@ struct bpf_map {
u32 btf_value_type_id;
struct btf *btf;
struct bpf_map_memory memory;
+ char name[BPF_OBJ_NAME_LEN];
+ u32 btf_vmlinux_value_type_id;
bool unpriv_array;
- bool frozen; /* write-once */
- /* 48 bytes hole */
+ bool frozen; /* write-once; write-protected by freeze_mutex */
+ /* 22 bytes hole */
/* The 3rd and 4th cacheline with misc members to avoid false sharing
* particularly with refcounting.
*/
- atomic_t refcnt ____cacheline_aligned;
- atomic_t usercnt;
+ atomic64_t refcnt ____cacheline_aligned;
+ atomic64_t usercnt;
struct work_struct work;
- char name[BPF_OBJ_NAME_LEN];
+ struct mutex freeze_mutex;
+ u64 writecnt; /* writable mmap cnt; protected by freeze_mutex */
};
static inline bool map_value_has_spin_lock(const struct bpf_map *map)
@@ -134,6 +160,7 @@ static inline void copy_map_value(struct bpf_map *map, void *dst, void *src)
}
void copy_map_value_locked(struct bpf_map *map, void *dst, void *src,
bool lock_src);
+int bpf_obj_name_cpy(char *dst, const char *src, unsigned int size);
struct bpf_offload_dev;
struct bpf_offloaded_map;
@@ -168,7 +195,8 @@ static inline bool bpf_map_offload_neutral(const struct bpf_map *map)
static inline bool bpf_map_support_seq_show(const struct bpf_map *map)
{
- return map->btf && map->ops->map_seq_show_elem;
+ return (map->btf_value_type_id || map->btf_vmlinux_value_type_id) &&
+ map->ops->map_seq_show_elem;
}
int map_check_no_btf(const struct bpf_map *map,
@@ -211,6 +239,7 @@ enum bpf_arg_type {
ARG_PTR_TO_INT, /* pointer to int */
ARG_PTR_TO_LONG, /* pointer to long */
ARG_PTR_TO_SOCKET, /* pointer to bpf_sock (fullsock) */
+ ARG_PTR_TO_BTF_ID, /* pointer to in-kernel struct */
};
/* type of values returned from helper functions */
@@ -233,11 +262,17 @@ struct bpf_func_proto {
bool gpl_only;
bool pkt_access;
enum bpf_return_type ret_type;
- enum bpf_arg_type arg1_type;
- enum bpf_arg_type arg2_type;
- enum bpf_arg_type arg3_type;
- enum bpf_arg_type arg4_type;
- enum bpf_arg_type arg5_type;
+ union {
+ struct {
+ enum bpf_arg_type arg1_type;
+ enum bpf_arg_type arg2_type;
+ enum bpf_arg_type arg3_type;
+ enum bpf_arg_type arg4_type;
+ enum bpf_arg_type arg5_type;
+ };
+ enum bpf_arg_type arg_type[5];
+ };
+ int *btf_id; /* BTF ids of arguments */
};
/* bpf_context is intentionally undefined structure. Pointer to bpf_context is
@@ -281,6 +316,7 @@ enum bpf_reg_type {
PTR_TO_TCP_SOCK_OR_NULL, /* reg points to struct tcp_sock or NULL */
PTR_TO_TP_BUFFER, /* reg points to a writable raw tp's buffer */
PTR_TO_XDP_SOCK, /* reg points to struct xdp_sock */
+ PTR_TO_BTF_ID, /* reg points to kernel struct */
};
/* The information passed from prog-specific *_is_valid_access
@@ -288,7 +324,11 @@ enum bpf_reg_type {
*/
struct bpf_insn_access_aux {
enum bpf_reg_type reg_type;
- int ctx_field_size;
+ union {
+ int ctx_field_size;
+ u32 btf_id;
+ };
+ struct bpf_verifier_log *log; /* for verbose logs */
};
static inline void
@@ -322,6 +362,10 @@ struct bpf_verifier_ops {
const struct bpf_insn *src,
struct bpf_insn *dst,
struct bpf_prog *prog, u32 *target_size);
+ int (*btf_struct_access)(struct bpf_verifier_log *log,
+ const struct btf_type *t, int off, int size,
+ enum bpf_access_type atype,
+ u32 *next_btf_id);
};
struct bpf_prog_offload_ops {
@@ -359,14 +403,219 @@ enum bpf_cgroup_storage_type {
#define MAX_BPF_CGROUP_STORAGE_TYPE __BPF_CGROUP_STORAGE_MAX
+/* The longest tracepoint has 12 args.
+ * See include/trace/bpf_probe.h
+ */
+#define MAX_BPF_FUNC_ARGS 12
+
struct bpf_prog_stats {
u64 cnt;
u64 nsecs;
struct u64_stats_sync syncp;
+} __aligned(2 * sizeof(u64));
+
+struct btf_func_model {
+ u8 ret_size;
+ u8 nr_args;
+ u8 arg_size[MAX_BPF_FUNC_ARGS];
+};
+
+/* Restore arguments before returning from trampoline to let original function
+ * continue executing. This flag is used for fentry progs when there are no
+ * fexit progs.
+ */
+#define BPF_TRAMP_F_RESTORE_REGS BIT(0)
+/* Call original function after fentry progs, but before fexit progs.
+ * Makes sense for fentry/fexit, normal calls and indirect calls.
+ */
+#define BPF_TRAMP_F_CALL_ORIG BIT(1)
+/* Skip current frame and return to parent. Makes sense for fentry/fexit
+ * programs only. Should not be used with normal calls and indirect calls.
+ */
+#define BPF_TRAMP_F_SKIP_FRAME BIT(2)
+
+/* Different use cases for BPF trampoline:
+ * 1. replace nop at the function entry (kprobe equivalent)
+ * flags = BPF_TRAMP_F_RESTORE_REGS
+ * fentry = a set of programs to run before returning from trampoline
+ *
+ * 2. replace nop at the function entry (kprobe + kretprobe equivalent)
+ * flags = BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_SKIP_FRAME
+ * orig_call = fentry_ip + MCOUNT_INSN_SIZE
+ * fentry = a set of program to run before calling original function
+ * fexit = a set of program to run after original function
+ *
+ * 3. replace direct call instruction anywhere in the function body
+ * or assign a function pointer for indirect call (like tcp_congestion_ops->cong_avoid)
+ * With flags = 0
+ * fentry = a set of programs to run before returning from trampoline
+ * With flags = BPF_TRAMP_F_CALL_ORIG
+ * orig_call = original callback addr or direct function addr
+ * fentry = a set of program to run before calling original function
+ * fexit = a set of program to run after original function
+ */
+int arch_prepare_bpf_trampoline(void *image, void *image_end,
+ const struct btf_func_model *m, u32 flags,
+ struct bpf_prog **fentry_progs, int fentry_cnt,
+ struct bpf_prog **fexit_progs, int fexit_cnt,
+ void *orig_call);
+/* these two functions are called from generated trampoline */
+u64 notrace __bpf_prog_enter(void);
+void notrace __bpf_prog_exit(struct bpf_prog *prog, u64 start);
+
+enum bpf_tramp_prog_type {
+ BPF_TRAMP_FENTRY,
+ BPF_TRAMP_FEXIT,
+ BPF_TRAMP_MAX,
+ BPF_TRAMP_REPLACE, /* more than MAX */
+};
+
+struct bpf_trampoline {
+ /* hlist for trampoline_table */
+ struct hlist_node hlist;
+ /* serializes access to fields of this trampoline */
+ struct mutex mutex;
+ refcount_t refcnt;
+ u64 key;
+ struct {
+ struct btf_func_model model;
+ void *addr;
+ bool ftrace_managed;
+ } func;
+ /* if !NULL this is BPF_PROG_TYPE_EXT program that extends another BPF
+ * program by replacing one of its functions. func.addr is the address
+ * of the function it replaced.
+ */
+ struct bpf_prog *extension_prog;
+ /* list of BPF programs using this trampoline */
+ struct hlist_head progs_hlist[BPF_TRAMP_MAX];
+ /* Number of attached programs. A counter per kind. */
+ int progs_cnt[BPF_TRAMP_MAX];
+ /* Executable image of trampoline */
+ void *image;
+ u64 selector;
+};
+
+#define BPF_DISPATCHER_MAX 48 /* Fits in 2048B */
+
+struct bpf_dispatcher_prog {
+ struct bpf_prog *prog;
+ refcount_t users;
+};
+
+struct bpf_dispatcher {
+ /* dispatcher mutex */
+ struct mutex mutex;
+ void *func;
+ struct bpf_dispatcher_prog progs[BPF_DISPATCHER_MAX];
+ int num_progs;
+ void *image;
+ u32 image_off;
+};
+
+static __always_inline unsigned int bpf_dispatcher_nopfunc(
+ const void *ctx,
+ const struct bpf_insn *insnsi,
+ unsigned int (*bpf_func)(const void *,
+ const struct bpf_insn *))
+{
+ return bpf_func(ctx, insnsi);
+}
+#ifdef CONFIG_BPF_JIT
+struct bpf_trampoline *bpf_trampoline_lookup(u64 key);
+int bpf_trampoline_link_prog(struct bpf_prog *prog);
+int bpf_trampoline_unlink_prog(struct bpf_prog *prog);
+void bpf_trampoline_put(struct bpf_trampoline *tr);
+#define BPF_DISPATCHER_INIT(name) { \
+ .mutex = __MUTEX_INITIALIZER(name.mutex), \
+ .func = &name##func, \
+ .progs = {}, \
+ .num_progs = 0, \
+ .image = NULL, \
+ .image_off = 0 \
+}
+
+#define DEFINE_BPF_DISPATCHER(name) \
+ noinline unsigned int name##func( \
+ const void *ctx, \
+ const struct bpf_insn *insnsi, \
+ unsigned int (*bpf_func)(const void *, \
+ const struct bpf_insn *)) \
+ { \
+ return bpf_func(ctx, insnsi); \
+ } \
+ EXPORT_SYMBOL(name##func); \
+ struct bpf_dispatcher name = BPF_DISPATCHER_INIT(name);
+#define DECLARE_BPF_DISPATCHER(name) \
+ unsigned int name##func( \
+ const void *ctx, \
+ const struct bpf_insn *insnsi, \
+ unsigned int (*bpf_func)(const void *, \
+ const struct bpf_insn *)); \
+ extern struct bpf_dispatcher name;
+#define BPF_DISPATCHER_FUNC(name) name##func
+#define BPF_DISPATCHER_PTR(name) (&name)
+void bpf_dispatcher_change_prog(struct bpf_dispatcher *d, struct bpf_prog *from,
+ struct bpf_prog *to);
+struct bpf_image {
+ struct latch_tree_node tnode;
+ unsigned char data[];
+};
+#define BPF_IMAGE_SIZE (PAGE_SIZE - sizeof(struct bpf_image))
+bool is_bpf_image_address(unsigned long address);
+void *bpf_image_alloc(void);
+#else
+static inline struct bpf_trampoline *bpf_trampoline_lookup(u64 key)
+{
+ return NULL;
+}
+static inline int bpf_trampoline_link_prog(struct bpf_prog *prog)
+{
+ return -ENOTSUPP;
+}
+static inline int bpf_trampoline_unlink_prog(struct bpf_prog *prog)
+{
+ return -ENOTSUPP;
+}
+static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
+#define DEFINE_BPF_DISPATCHER(name)
+#define DECLARE_BPF_DISPATCHER(name)
+#define BPF_DISPATCHER_FUNC(name) bpf_dispatcher_nopfunc
+#define BPF_DISPATCHER_PTR(name) NULL
+static inline void bpf_dispatcher_change_prog(struct bpf_dispatcher *d,
+ struct bpf_prog *from,
+ struct bpf_prog *to) {}
+static inline bool is_bpf_image_address(unsigned long address)
+{
+ return false;
+}
+#endif
+
+struct bpf_func_info_aux {
+ u16 linkage;
+ bool unreliable;
+};
+
+enum bpf_jit_poke_reason {
+ BPF_POKE_REASON_TAIL_CALL,
+};
+
+/* Descriptor of pokes pointing /into/ the JITed image. */
+struct bpf_jit_poke_descriptor {
+ void *ip;
+ union {
+ struct {
+ struct bpf_map *map;
+ u32 key;
+ } tail_call;
+ };
+ bool ip_stable;
+ u8 adj_off;
+ u16 reason;
};
struct bpf_prog_aux {
- atomic_t refcnt;
+ atomic64_t refcnt;
u32 used_map_cnt;
u32 max_ctx_offset;
u32 max_pkt_offset;
@@ -375,10 +624,23 @@ struct bpf_prog_aux {
u32 id;
u32 func_cnt; /* used by non-func prog as the number of func progs */
u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
+ u32 attach_btf_id; /* in-kernel BTF type id to attach to */
+ struct bpf_prog *linked_prog;
bool verifier_zext; /* Zero extensions has been inserted by verifier. */
bool offload_requested;
+ bool attach_btf_trace; /* true if attaching to BTF-enabled raw tp */
+ bool func_proto_unreliable;
+ enum bpf_tramp_prog_type trampoline_prog_type;
+ struct bpf_trampoline *trampoline;
+ struct hlist_node tramp_hlist;
+ /* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
+ const struct btf_type *attach_func_proto;
+ /* function name for valid attach_btf_id */
+ const char *attach_func_name;
struct bpf_prog **func;
void *jit_data; /* JIT specific data. arch dependent */
+ struct bpf_jit_poke_descriptor *poke_tab;
+ u32 size_poke_tab;
struct latch_tree_node ksym_tnode;
struct list_head ksym_lnode;
const struct bpf_prog_ops *ops;
@@ -394,6 +656,7 @@ struct bpf_prog_aux {
struct bpf_prog_offload *offload;
struct btf *btf;
struct bpf_func_info *func_info;
+ struct bpf_func_info_aux *func_info_aux;
/* bpf_line_info loaded from userspace. linfo->insn_off
* has the xlated insn offset.
* Both the main and sub prog share the same linfo.
@@ -416,6 +679,8 @@ struct bpf_prog_aux {
* main prog always has linfo_idx == 0
*/
u32 linfo_idx;
+ u32 num_exentries;
+ struct exception_table_entry *extable;
struct bpf_prog_stats __percpu *stats;
union {
struct work_struct work;
@@ -423,17 +688,96 @@ struct bpf_prog_aux {
};
};
+struct bpf_array_aux {
+ /* 'Ownership' of prog array is claimed by the first program that
+ * is going to use this map or by the first program which FD is
+ * stored in the map to make sure that all callers and callees have
+ * the same prog type and JITed flag.
+ */
+ enum bpf_prog_type type;
+ bool jited;
+ /* Programs with direct jumps into programs part of this array. */
+ struct list_head poke_progs;
+ struct bpf_map *map;
+ struct mutex poke_mutex;
+ struct work_struct work;
+};
+
+struct bpf_struct_ops_value;
+struct btf_type;
+struct btf_member;
+
+#define BPF_STRUCT_OPS_MAX_NR_MEMBERS 64
+struct bpf_struct_ops {
+ const struct bpf_verifier_ops *verifier_ops;
+ int (*init)(struct btf *btf);
+ int (*check_member)(const struct btf_type *t,
+ const struct btf_member *member);
+ int (*init_member)(const struct btf_type *t,
+ const struct btf_member *member,
+ void *kdata, const void *udata);
+ int (*reg)(void *kdata);
+ void (*unreg)(void *kdata);
+ const struct btf_type *type;
+ const struct btf_type *value_type;
+ const char *name;
+ struct btf_func_model func_models[BPF_STRUCT_OPS_MAX_NR_MEMBERS];
+ u32 type_id;
+ u32 value_id;
+};
+
+#if defined(CONFIG_BPF_JIT) && defined(CONFIG_BPF_SYSCALL)
+#define BPF_MODULE_OWNER ((void *)((0xeB9FUL << 2) + POISON_POINTER_DELTA))
+const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id);
+void bpf_struct_ops_init(struct btf *btf, struct bpf_verifier_log *log);
+bool bpf_struct_ops_get(const void *kdata);
+void bpf_struct_ops_put(const void *kdata);
+int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map, void *key,
+ void *value);
+static inline bool bpf_try_module_get(const void *data, struct module *owner)
+{
+ if (owner == BPF_MODULE_OWNER)
+ return bpf_struct_ops_get(data);
+ else
+ return try_module_get(owner);
+}
+static inline void bpf_module_put(const void *data, struct module *owner)
+{
+ if (owner == BPF_MODULE_OWNER)
+ bpf_struct_ops_put(data);
+ else
+ module_put(owner);
+}
+#else
+static inline const struct bpf_struct_ops *bpf_struct_ops_find(u32 type_id)
+{
+ return NULL;
+}
+static inline void bpf_struct_ops_init(struct btf *btf,
+ struct bpf_verifier_log *log)
+{
+}
+static inline bool bpf_try_module_get(const void *data, struct module *owner)
+{
+ return try_module_get(owner);
+}
+static inline void bpf_module_put(const void *data, struct module *owner)
+{
+ module_put(owner);
+}
+static inline int bpf_struct_ops_map_sys_lookup_elem(struct bpf_map *map,
+ void *key,
+ void *value)
+{
+ return -EINVAL;
+}
+#endif
+
struct bpf_array {
struct bpf_map map;
u32 elem_size;
u32 index_mask;
- /* 'ownership' of prog_array is claimed by the first program that
- * is going to use this map or by the first program which FD is stored
- * in the map to make sure that all callers and callees have the same
- * prog_type and JITed flag
- */
- enum bpf_prog_type owner_prog_type;
- bool owner_jited;
+ struct bpf_array_aux *aux;
union {
char value[0] __aligned(8);
void *ptrs[0] __aligned(8);
@@ -482,6 +826,7 @@ struct bpf_event_entry {
bool bpf_prog_array_compatible(struct bpf_array *array, const struct bpf_prog *fp);
int bpf_prog_calc_tag(struct bpf_prog *fp);
+const char *kernel_type_name(u32 btf_type_id);
const struct bpf_func_proto *bpf_get_trace_printk_proto(void);
@@ -620,7 +965,7 @@ DECLARE_PER_CPU(int, bpf_prog_active);
extern const struct file_operations bpf_map_fops;
extern const struct file_operations bpf_prog_fops;
-#define BPF_PROG_TYPE(_id, _name) \
+#define BPF_PROG_TYPE(_id, _name, prog_ctx_type, kern_ctx_type) \
extern const struct bpf_prog_ops _name ## _prog_ops; \
extern const struct bpf_verifier_ops _name ## _verifier_ops;
#define BPF_MAP_TYPE(_id, _ops) \
@@ -636,22 +981,24 @@ extern const struct bpf_verifier_ops xdp_analyzer_ops;
struct bpf_prog *bpf_prog_get(u32 ufd);
struct bpf_prog *bpf_prog_get_type_dev(u32 ufd, enum bpf_prog_type type,
bool attach_drv);
-struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog, int i);
+void bpf_prog_add(struct bpf_prog *prog, int i);
void bpf_prog_sub(struct bpf_prog *prog, int i);
-struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog);
+void bpf_prog_inc(struct bpf_prog *prog);
struct bpf_prog * __must_check bpf_prog_inc_not_zero(struct bpf_prog *prog);
void bpf_prog_put(struct bpf_prog *prog);
int __bpf_prog_charge(struct user_struct *user, u32 pages);
void __bpf_prog_uncharge(struct user_struct *user, u32 pages);
+void __bpf_free_used_maps(struct bpf_prog_aux *aux,
+ struct bpf_map **used_maps, u32 len);
void bpf_prog_free_id(struct bpf_prog *prog, bool do_idr_lock);
void bpf_map_free_id(struct bpf_map *map, bool do_idr_lock);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
struct bpf_map *__bpf_map_get(struct fd f);
-struct bpf_map * __must_check bpf_map_inc(struct bpf_map *map, bool uref);
-struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map,
- bool uref);
+void bpf_map_inc(struct bpf_map *map);
+void bpf_map_inc_with_uref(struct bpf_map *map);
+struct bpf_map * __must_check bpf_map_inc_not_zero(struct bpf_map *map);
void bpf_map_put_with_uref(struct bpf_map *map);
void bpf_map_put(struct bpf_map *map);
int bpf_map_charge_memlock(struct bpf_map *map, u32 pages);
@@ -661,8 +1008,18 @@ void bpf_map_charge_finish(struct bpf_map_memory *mem);
void bpf_map_charge_move(struct bpf_map_memory *dst,
struct bpf_map_memory *src);
void *bpf_map_area_alloc(u64 size, int numa_node);
+void *bpf_map_area_mmapable_alloc(u64 size, int numa_node);
void bpf_map_area_free(void *base);
void bpf_map_init_from_attr(struct bpf_map *map, union bpf_attr *attr);
+int generic_map_lookup_batch(struct bpf_map *map,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+int generic_map_update_batch(struct bpf_map *map,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
+int generic_map_delete_batch(struct bpf_map *map,
+ const union bpf_attr *attr,
+ union bpf_attr __user *uattr);
extern int sysctl_unprivileged_bpf_disabled;
@@ -719,14 +1076,16 @@ struct sk_buff;
struct bpf_dtab_netdev *__dev_map_lookup_elem(struct bpf_map *map, u32 key);
struct bpf_dtab_netdev *__dev_map_hash_lookup_elem(struct bpf_map *map, u32 key);
-void __dev_map_flush(struct bpf_map *map);
+void __dev_flush(void);
+int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
+ struct net_device *dev_rx);
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
struct net_device *dev_rx);
int dev_map_generic_redirect(struct bpf_dtab_netdev *dst, struct sk_buff *skb,
struct bpf_prog *xdp_prog);
struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key);
-void __cpu_map_flush(struct bpf_map *map);
+void __cpu_map_flush(void);
int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_buff *xdp,
struct net_device *dev_rx);
@@ -747,6 +1106,32 @@ int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
const union bpf_attr *kattr,
union bpf_attr __user *uattr);
+bool btf_ctx_access(int off, int size, enum bpf_access_type type,
+ const struct bpf_prog *prog,
+ struct bpf_insn_access_aux *info);
+int btf_struct_access(struct bpf_verifier_log *log,
+ const struct btf_type *t, int off, int size,
+ enum bpf_access_type atype,
+ u32 *next_btf_id);
+int btf_resolve_helper_id(struct bpf_verifier_log *log,
+ const struct bpf_func_proto *fn, int);
+
+int btf_distill_func_proto(struct bpf_verifier_log *log,
+ struct btf *btf,
+ const struct btf_type *func_proto,
+ const char *func_name,
+ struct btf_func_model *m);
+
+struct bpf_reg_state;
+int btf_check_func_arg_match(struct bpf_verifier_env *env, int subprog,
+ struct bpf_reg_state *regs);
+int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
+ struct bpf_reg_state *reg);
+int btf_check_type_match(struct bpf_verifier_env *env, struct bpf_prog *prog,
+ struct btf *btf, const struct btf_type *t);
+
+struct bpf_prog *bpf_prog_by_id(u32 id);
+
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
@@ -760,10 +1145,8 @@ static inline struct bpf_prog *bpf_prog_get_type_dev(u32 ufd,
return ERR_PTR(-EOPNOTSUPP);
}
-static inline struct bpf_prog * __must_check bpf_prog_add(struct bpf_prog *prog,
- int i)
+static inline void bpf_prog_add(struct bpf_prog *prog, int i)
{
- return ERR_PTR(-EOPNOTSUPP);
}
static inline void bpf_prog_sub(struct bpf_prog *prog, int i)
@@ -774,9 +1157,8 @@ static inline void bpf_prog_put(struct bpf_prog *prog)
{
}
-static inline struct bpf_prog * __must_check bpf_prog_inc(struct bpf_prog *prog)
+static inline void bpf_prog_inc(struct bpf_prog *prog)
{
- return ERR_PTR(-EOPNOTSUPP);
}
static inline struct bpf_prog *__must_check
@@ -811,7 +1193,7 @@ static inline struct net_device *__dev_map_hash_lookup_elem(struct bpf_map *map
return NULL;
}
-static inline void __dev_map_flush(struct bpf_map *map)
+static inline void __dev_flush(void)
{
}
@@ -819,6 +1201,13 @@ struct xdp_buff;
struct bpf_dtab_netdev;
static inline
+int dev_xdp_enqueue(struct net_device *dev, struct xdp_buff *xdp,
+ struct net_device *dev_rx)
+{
+ return 0;
+}
+
+static inline
int dev_map_enqueue(struct bpf_dtab_netdev *dst, struct xdp_buff *xdp,
struct net_device *dev_rx)
{
@@ -840,7 +1229,7 @@ struct bpf_cpu_map_entry *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
return NULL;
}
-static inline void __cpu_map_flush(struct bpf_map *map)
+static inline void __cpu_map_flush(void)
{
}
@@ -877,6 +1266,15 @@ static inline int bpf_prog_test_run_flow_dissector(struct bpf_prog *prog,
{
return -ENOTSUPP;
}
+
+static inline void bpf_map_put(struct bpf_map *map)
+{
+}
+
+static inline struct bpf_prog *bpf_prog_by_id(u32 id)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
#endif /* CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get_type(u32 ufd,
@@ -972,31 +1370,6 @@ static inline int sock_map_get_from_fd(const union bpf_attr *attr,
}
#endif
-#if defined(CONFIG_XDP_SOCKETS)
-struct xdp_sock;
-struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map, u32 key);
-int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
- struct xdp_sock *xs);
-void __xsk_map_flush(struct bpf_map *map);
-#else
-struct xdp_sock;
-static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
- u32 key)
-{
- return NULL;
-}
-
-static inline int __xsk_map_redirect(struct bpf_map *map, struct xdp_buff *xdp,
- struct xdp_sock *xs)
-{
- return -EOPNOTSUPP;
-}
-
-static inline void __xsk_map_flush(struct bpf_map *map)
-{
-}
-#endif
-
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
void bpf_sk_reuseport_detach(struct sock *sk);
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
@@ -1055,6 +1428,7 @@ extern const struct bpf_func_proto bpf_get_local_storage_proto;
extern const struct bpf_func_proto bpf_strtol_proto;
extern const struct bpf_func_proto bpf_strtoul_proto;
extern const struct bpf_func_proto bpf_tcp_sock_proto;
+extern const struct bpf_func_proto bpf_jiffies64_proto;
/* Shared helpers among cBPF and eBPF. */
void bpf_user_rnd_init_once(void);
@@ -1095,6 +1469,15 @@ static inline u32 bpf_sock_convert_ctx_access(enum bpf_access_type type,
#endif
#ifdef CONFIG_INET
+struct sk_reuseport_kern {
+ struct sk_buff *skb;
+ struct sock *sk;
+ struct sock *selected_sk;
+ void *data_end;
+ u32 hash;
+ u32 reuseport_id;
+ bool bind_inany;
+};
bool bpf_tcp_sock_is_valid_access(int off, int size, enum bpf_access_type type,
struct bpf_insn_access_aux *info);
@@ -1145,4 +1528,12 @@ static inline u32 bpf_xdp_sock_convert_ctx_access(enum bpf_access_type type,
}
#endif /* CONFIG_INET */
+enum bpf_text_poke_type {
+ BPF_MOD_CALL,
+ BPF_MOD_JUMP,
+};
+
+int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
+ void *addr1, void *addr2);
+
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
index 36a9c2325176..c81d4ece79a4 100644
--- a/include/linux/bpf_types.h
+++ b/include/linux/bpf_types.h
@@ -2,41 +2,74 @@
/* internal file - do not include directly */
#ifdef CONFIG_NET
-BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter)
-BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act)
-BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act)
-BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp,
+ struct xdp_md, struct xdp_buff)
#ifdef CONFIG_CGROUP_BPF
-BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb)
-BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock)
-BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock,
+ struct bpf_sock, struct sock)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK_ADDR, cg_sock_addr,
+ struct bpf_sock_addr, struct bpf_sock_addr_kern)
#endif
-BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_in)
-BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_out)
-BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit)
-BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local)
-BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops)
-BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb)
-BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg)
-BPF_PROG_TYPE(BPF_PROG_TYPE_FLOW_DISSECTOR, flow_dissector)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_in,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_out,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_SEG6LOCAL, lwt_seg6local,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SOCK_OPS, sock_ops,
+ struct bpf_sock_ops, struct bpf_sock_ops_kern)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_SKB, sk_skb,
+ struct __sk_buff, struct sk_buff)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_MSG, sk_msg,
+ struct sk_msg_md, struct sk_msg)
+BPF_PROG_TYPE(BPF_PROG_TYPE_FLOW_DISSECTOR, flow_dissector,
+ struct __sk_buff, struct bpf_flow_dissector)
#endif
#ifdef CONFIG_BPF_EVENTS
-BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe)
-BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint)
-BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event)
-BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint)
-BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable)
+BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe,
+ bpf_user_pt_regs_t, struct pt_regs)
+BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint,
+ __u64, u64)
+BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event,
+ struct bpf_perf_event_data, struct bpf_perf_event_data_kern)
+BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT, raw_tracepoint,
+ struct bpf_raw_tracepoint_args, u64)
+BPF_PROG_TYPE(BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, raw_tracepoint_writable,
+ struct bpf_raw_tracepoint_args, u64)
+BPF_PROG_TYPE(BPF_PROG_TYPE_TRACING, tracing,
+ void *, void *)
#endif
#ifdef CONFIG_CGROUP_BPF
-BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev)
-BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SYSCTL, cg_sysctl)
-BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCKOPT, cg_sockopt)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_DEVICE, cg_dev,
+ struct bpf_cgroup_dev_ctx, struct bpf_cgroup_dev_ctx)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SYSCTL, cg_sysctl,
+ struct bpf_sysctl, struct bpf_sysctl_kern)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCKOPT, cg_sockopt,
+ struct bpf_sockopt, struct bpf_sockopt_kern)
#endif
#ifdef CONFIG_BPF_LIRC_MODE2
-BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LIRC_MODE2, lirc_mode2,
+ __u32, u32)
#endif
#ifdef CONFIG_INET
-BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SK_REUSEPORT, sk_reuseport,
+ struct sk_reuseport_md, struct sk_reuseport_kern)
+#endif
+#if defined(CONFIG_BPF_JIT)
+BPF_PROG_TYPE(BPF_PROG_TYPE_STRUCT_OPS, bpf_struct_ops,
+ void *, void *)
+BPF_PROG_TYPE(BPF_PROG_TYPE_EXT, bpf_extension,
+ void *, void *)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
@@ -78,3 +111,6 @@ BPF_MAP_TYPE(BPF_MAP_TYPE_REUSEPORT_SOCKARRAY, reuseport_array_ops)
#endif
BPF_MAP_TYPE(BPF_MAP_TYPE_QUEUE, queue_map_ops)
BPF_MAP_TYPE(BPF_MAP_TYPE_STACK, stack_map_ops)
+#if defined(CONFIG_BPF_JIT)
+BPF_MAP_TYPE(BPF_MAP_TYPE_STRUCT_OPS, bpf_struct_ops_map_ops)
+#endif
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 26a6d58ca78c..5406e6e96585 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -52,6 +52,8 @@ struct bpf_reg_state {
*/
struct bpf_map *map_ptr;
+ u32 btf_id; /* for PTR_TO_BTF_ID */
+
/* Max size from any of the above. */
unsigned long raw;
};
@@ -291,7 +293,7 @@ struct bpf_verifier_state_list {
struct bpf_insn_aux_data {
union {
enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
- unsigned long map_state; /* pointer/poison value for maps */
+ unsigned long map_ptr_state; /* pointer/poison value for maps */
s32 call_imm; /* saved imm field of call insn */
u32 alu_limit; /* limit for add/sub register with pointer */
struct {
@@ -299,13 +301,16 @@ struct bpf_insn_aux_data {
u32 map_off; /* offset from value base address */
};
};
+ u64 map_key_state; /* constant (32 bit) key tracking for maps */
int ctx_field_size; /* the ctx field size for load insn, maybe 0 */
int sanitize_stack_off; /* stack slot to be cleared */
- bool seen; /* this insn was processed by the verifier */
+ u32 seen; /* this insn was processed by the verifier at env->pass_cnt */
bool zext_dst; /* this insn zero extends dst reg */
u8 alu_state; /* used in combination with alu_limit */
- bool prune_point;
+
+ /* below fields are initialized once */
unsigned int orig_idx; /* original instruction index */
+ bool prune_point;
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
@@ -330,15 +335,18 @@ static inline bool bpf_verifier_log_full(const struct bpf_verifier_log *log)
#define BPF_LOG_STATS 4
#define BPF_LOG_LEVEL (BPF_LOG_LEVEL1 | BPF_LOG_LEVEL2)
#define BPF_LOG_MASK (BPF_LOG_LEVEL | BPF_LOG_STATS)
+#define BPF_LOG_KERNEL (BPF_LOG_MASK + 1) /* kernel internal flag */
static inline bool bpf_verifier_log_needed(const struct bpf_verifier_log *log)
{
- return log->level && log->ubuf && !bpf_verifier_log_full(log);
+ return (log->level && log->ubuf && !bpf_verifier_log_full(log)) ||
+ log->level == BPF_LOG_KERNEL;
}
#define BPF_MAX_SUBPROGS 256
struct bpf_subprog_info {
+ /* 'start' has to be the first field otherwise find_subprog() won't work */
u32 start; /* insn idx of function entry point */
u32 linfo_idx; /* The idx to the main_prog->aux->linfo */
u16 stack_depth; /* max. stack depth used by this function */
@@ -373,6 +381,7 @@ struct bpf_verifier_env {
int *insn_stack;
int cur_stack;
} cfg;
+ u32 pass_cnt; /* number of times do_check() was called */
u32 subprog_cnt;
/* number of instructions analyzed by the verifier */
u32 prev_insn_processed, insn_processed;
@@ -397,6 +406,8 @@ __printf(2, 0) void bpf_verifier_vlog(struct bpf_verifier_log *log,
const char *fmt, va_list args);
__printf(2, 3) void bpf_verifier_log_write(struct bpf_verifier_env *env,
const char *fmt, ...);
+__printf(2, 3) void bpf_log(struct bpf_verifier_log *log,
+ const char *fmt, ...);
static inline struct bpf_func_state *cur_func(struct bpf_verifier_env *env)
{
@@ -420,4 +431,7 @@ bpf_prog_offload_replace_insn(struct bpf_verifier_env *env, u32 off,
void
bpf_prog_offload_remove_insns(struct bpf_verifier_env *env, u32 off, u32 cnt);
+int check_ctx_reg(struct bpf_verifier_env *env,
+ const struct bpf_reg_state *reg, int regno);
+
#endif /* _LINUX_BPF_VERIFIER_H */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 6db2d9a6e503..b475e7f20d28 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -200,9 +200,15 @@
#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */
#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */
#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */
-#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */
-#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */
+/* 10011: SerDes 100-FX Control Register */
+#define BCM54616S_SHD_100FX_CTRL 0x13
+#define BCM54616S_100FX_MODE BIT(0) /* 100-FX SerDes Enable */
+
+/* 11111: Mode Control Register */
+#define BCM54XX_SHD_MODE 0x1f
+#define BCM54XX_SHD_INTF_SEL_MASK GENMASK(2, 1) /* INTERF_SEL[1:0] */
+#define BCM54XX_SHD_MODE_1000BX BIT(0) /* Enable 1000-X registers */
/*
* EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17)
diff --git a/include/linux/bsearch.h b/include/linux/bsearch.h
index 62b1eb348858..8ed53d7524ea 100644
--- a/include/linux/bsearch.h
+++ b/include/linux/bsearch.h
@@ -5,6 +5,6 @@
#include <linux/types.h>
void *bsearch(const void *key, const void *base, size_t num, size_t size,
- int (*cmp)(const void *key, const void *elt));
+ cmp_func_t cmp);
#endif /* _LINUX_BSEARCH_H */
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 64cdf2a23d42..5c1ea99b480f 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -5,6 +5,9 @@
#define _LINUX_BTF_H 1
#include <linux/types.h>
+#include <uapi/linux/btf.h>
+
+#define BTF_TYPE_EMIT(type) ((void)(type *)0)
struct btf;
struct btf_member;
@@ -52,10 +55,92 @@ bool btf_member_is_reg_int(const struct btf *btf, const struct btf_type *s,
u32 expected_offset, u32 expected_size);
int btf_find_spin_lock(const struct btf *btf, const struct btf_type *t);
bool btf_type_is_void(const struct btf_type *t);
+s32 btf_find_by_name_kind(const struct btf *btf, const char *name, u8 kind);
+const struct btf_type *btf_type_skip_modifiers(const struct btf *btf,
+ u32 id, u32 *res_id);
+const struct btf_type *btf_type_resolve_ptr(const struct btf *btf,
+ u32 id, u32 *res_id);
+const struct btf_type *btf_type_resolve_func_ptr(const struct btf *btf,
+ u32 id, u32 *res_id);
+const struct btf_type *
+btf_resolve_size(const struct btf *btf, const struct btf_type *type,
+ u32 *type_size, const struct btf_type **elem_type,
+ u32 *total_nelems);
+
+#define for_each_member(i, struct_type, member) \
+ for (i = 0, member = btf_type_member(struct_type); \
+ i < btf_type_vlen(struct_type); \
+ i++, member++)
+
+static inline bool btf_type_is_ptr(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_PTR;
+}
+
+static inline bool btf_type_is_int(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_INT;
+}
+
+static inline bool btf_type_is_enum(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_ENUM;
+}
+
+static inline bool btf_type_is_typedef(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_TYPEDEF;
+}
+
+static inline bool btf_type_is_func(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC;
+}
+
+static inline bool btf_type_is_func_proto(const struct btf_type *t)
+{
+ return BTF_INFO_KIND(t->info) == BTF_KIND_FUNC_PROTO;
+}
+
+static inline u16 btf_type_vlen(const struct btf_type *t)
+{
+ return BTF_INFO_VLEN(t->info);
+}
+
+static inline u16 btf_func_linkage(const struct btf_type *t)
+{
+ return BTF_INFO_VLEN(t->info);
+}
+
+static inline bool btf_type_kflag(const struct btf_type *t)
+{
+ return BTF_INFO_KFLAG(t->info);
+}
+
+static inline u32 btf_member_bit_offset(const struct btf_type *struct_type,
+ const struct btf_member *member)
+{
+ return btf_type_kflag(struct_type) ? BTF_MEMBER_BIT_OFFSET(member->offset)
+ : member->offset;
+}
+
+static inline u32 btf_member_bitfield_size(const struct btf_type *struct_type,
+ const struct btf_member *member)
+{
+ return btf_type_kflag(struct_type) ? BTF_MEMBER_BITFIELD_SIZE(member->offset)
+ : 0;
+}
+
+static inline const struct btf_member *btf_type_member(const struct btf_type *t)
+{
+ return (const struct btf_member *)(t + 1);
+}
#ifdef CONFIG_BPF_SYSCALL
const struct btf_type *btf_type_by_id(const struct btf *btf, u32 type_id);
const char *btf_name_by_offset(const struct btf *btf, u32 offset);
+struct btf *btf_parse_vmlinux(void);
+struct btf *bpf_prog_get_target_btf(const struct bpf_prog *prog);
#else
static inline const struct btf_type *btf_type_by_id(const struct btf *btf,
u32 type_id)
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 7b73ef7f902d..e0b020eaf32e 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -22,9 +22,6 @@ enum bh_state_bits {
BH_Dirty, /* Is dirty */
BH_Lock, /* Is locked */
BH_Req, /* Has been submitted for I/O */
- BH_Uptodate_Lock,/* Used by the first bh in a page, to serialise
- * IO completion of other buffers in the page
- */
BH_Mapped, /* Has a disk mapping */
BH_New, /* Disk mapping was newly created by get_block */
@@ -76,6 +73,9 @@ struct buffer_head {
struct address_space *b_assoc_map; /* mapping this buffer is
associated with */
atomic_t b_count; /* users using this buffer_head */
+ spinlock_t b_uptodate_lock; /* Used by the first bh in a page, to
+ * serialise IO completion of other
+ * buffers in the page */
};
/*
diff --git a/include/linux/build_bug.h b/include/linux/build_bug.h
index 0fe5426f2bdc..e3a0be2c90ad 100644
--- a/include/linux/build_bug.h
+++ b/include/linux/build_bug.h
@@ -9,11 +9,11 @@
#else /* __CHECKER__ */
/*
* Force a compilation error if condition is true, but also produce a
- * result (of value 0 and type size_t), so the expression can be used
+ * result (of value 0 and type int), so the expression can be used
* e.g. in a structure initializer (or where-ever else comma expressions
* aren't permitted).
*/
-#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:(-!!(e)); }))
+#define BUILD_BUG_ON_ZERO(e) ((int)(sizeof(struct { int:(-!!(e)); })))
#endif /* __CHECKER__ */
/* Force a compilation error if a constant expression is not a power of 2 */
diff --git a/include/linux/bvec.h b/include/linux/bvec.h
index a032f01e928c..a81c13ac1972 100644
--- a/include/linux/bvec.h
+++ b/include/linux/bvec.h
@@ -87,26 +87,24 @@ struct bvec_iter_all {
static inline bool bvec_iter_advance(const struct bio_vec *bv,
struct bvec_iter *iter, unsigned bytes)
{
+ unsigned int idx = iter->bi_idx;
+
if (WARN_ONCE(bytes > iter->bi_size,
"Attempted to advance past end of bvec iter\n")) {
iter->bi_size = 0;
return false;
}
- while (bytes) {
- const struct bio_vec *cur = bv + iter->bi_idx;
- unsigned len = min3(bytes, iter->bi_size,
- cur->bv_len - iter->bi_bvec_done);
-
- bytes -= len;
- iter->bi_size -= len;
- iter->bi_bvec_done += len;
+ iter->bi_size -= bytes;
+ bytes += iter->bi_bvec_done;
- if (iter->bi_bvec_done == cur->bv_len) {
- iter->bi_bvec_done = 0;
- iter->bi_idx++;
- }
+ while (bytes && bytes >= bv[idx].bv_len) {
+ bytes -= bv[idx].bv_len;
+ idx++;
}
+
+ iter->bi_idx = idx;
+ iter->bi_bvec_done = bytes;
return true;
}
@@ -155,26 +153,4 @@ static inline void bvec_advance(const struct bio_vec *bvec,
}
}
-/*
- * Get the last single-page segment from the multi-page bvec and store it
- * in @seg
- */
-static inline void mp_bvec_last_segment(const struct bio_vec *bvec,
- struct bio_vec *seg)
-{
- unsigned total = bvec->bv_offset + bvec->bv_len;
- unsigned last_page = (total - 1) / PAGE_SIZE;
-
- seg->bv_page = bvec->bv_page + last_page;
-
- /* the whole segment is inside the last page */
- if (bvec->bv_offset >= last_page * PAGE_SIZE) {
- seg->bv_offset = bvec->bv_offset % PAGE_SIZE;
- seg->bv_len = bvec->bv_len;
- } else {
- seg->bv_offset = 0;
- seg->bv_len = total - last_page * PAGE_SIZE;
- }
-}
-
#endif /* __LINUX_BVEC_ITER_H */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 9b3c720a31b1..5e3d45525bd3 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -18,6 +18,7 @@
#include <linux/can/error.h>
#include <linux/can/led.h>
#include <linux/can/netlink.h>
+#include <linux/can/skb.h>
#include <linux/netdevice.h>
/*
@@ -91,6 +92,36 @@ struct can_priv {
#define get_can_dlc(i) (min_t(__u8, (i), CAN_MAX_DLC))
#define get_canfd_dlc(i) (min_t(__u8, (i), CANFD_MAX_DLC))
+/* Check for outgoing skbs that have not been created by the CAN subsystem */
+static inline bool can_skb_headroom_valid(struct net_device *dev,
+ struct sk_buff *skb)
+{
+ /* af_packet creates a headroom of HH_DATA_MOD bytes which is fine */
+ if (WARN_ON_ONCE(skb_headroom(skb) < sizeof(struct can_skb_priv)))
+ return false;
+
+ /* af_packet does not apply CAN skb specific settings */
+ if (skb->ip_summed == CHECKSUM_NONE) {
+ /* init headroom */
+ can_skb_prv(skb)->ifindex = dev->ifindex;
+ can_skb_prv(skb)->skbcnt = 0;
+
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+ /* preform proper loopback on capable devices */
+ if (dev->flags & IFF_ECHO)
+ skb->pkt_type = PACKET_LOOPBACK;
+ else
+ skb->pkt_type = PACKET_HOST;
+
+ skb_reset_mac_header(skb);
+ skb_reset_network_header(skb);
+ skb_reset_transport_header(skb);
+ }
+
+ return true;
+}
+
/* Drop a given socketbuffer if it does not contain a valid CAN frame. */
static inline bool can_dropped_invalid_skb(struct net_device *dev,
struct sk_buff *skb)
@@ -108,6 +139,9 @@ static inline bool can_dropped_invalid_skb(struct net_device *dev,
} else
goto inval_skb;
+ if (!can_skb_headroom_valid(dev, skb))
+ goto inval_skb;
+
return false;
inval_skb:
diff --git a/include/linux/can/platform/mcp251x.h b/include/linux/can/platform/mcp251x.h
deleted file mode 100644
index 9e5ac27fb6c1..000000000000
--- a/include/linux/can/platform/mcp251x.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _CAN_PLATFORM_MCP251X_H
-#define _CAN_PLATFORM_MCP251X_H
-
-/*
- *
- * CAN bus driver for Microchip 251x CAN Controller with SPI Interface
- *
- */
-
-#include <linux/spi/spi.h>
-
-/*
- * struct mcp251x_platform_data - MCP251X SPI CAN controller platform data
- * @oscillator_frequency: - oscillator frequency in Hz
- */
-
-struct mcp251x_platform_data {
- unsigned long oscillator_frequency;
-};
-
-#endif /* !_CAN_PLATFORM_MCP251X_H */
diff --git a/include/linux/can/rx-offload.h b/include/linux/can/rx-offload.h
index 01219f2902bf..1b78a0cfb615 100644
--- a/include/linux/can/rx-offload.h
+++ b/include/linux/can/rx-offload.h
@@ -15,9 +15,9 @@
struct can_rx_offload {
struct net_device *dev;
- unsigned int (*mailbox_read)(struct can_rx_offload *offload,
- struct can_frame *cf,
- u32 *timestamp, unsigned int mb);
+ struct sk_buff *(*mailbox_read)(struct can_rx_offload *offload,
+ unsigned int mb, u32 *timestamp,
+ bool drop);
struct sk_buff_head skb_queue;
u32 skb_queue_len_max;
@@ -44,7 +44,6 @@ unsigned int can_rx_offload_get_echo_skb(struct can_rx_offload *offload,
unsigned int idx, u32 timestamp);
int can_rx_offload_queue_tail(struct can_rx_offload *offload,
struct sk_buff *skb);
-void can_rx_offload_reset(struct can_rx_offload *offload);
void can_rx_offload_del(struct can_rx_offload *offload);
void can_rx_offload_enable(struct can_rx_offload *offload);
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index b9dbda1c26aa..ec73ebc4827d 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -280,10 +280,13 @@ extern const char *ceph_msg_type_name(int type);
extern int ceph_check_fsid(struct ceph_client *client, struct ceph_fsid *fsid);
extern void *ceph_kvmalloc(size_t size, gfp_t flags);
-extern struct ceph_options *ceph_parse_options(char *options,
- const char *dev_name, const char *dev_name_end,
- int (*parse_extra_token)(char *c, void *private),
- void *private);
+struct fs_parameter;
+struct fc_log;
+struct ceph_options *ceph_alloc_options(void);
+int ceph_parse_mon_ips(const char *buf, size_t len, struct ceph_options *opt,
+ struct fc_log *l);
+int ceph_parse_param(struct fs_parameter *param, struct ceph_options *opt,
+ struct fc_log *l);
int ceph_print_client_options(struct seq_file *m, struct ceph_client *client,
bool show_all);
extern void ceph_destroy_options(struct ceph_options *opt);
diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
index 0067d767c9ae..35d385296fbb 100644
--- a/include/linux/ceph/mdsmap.h
+++ b/include/linux/ceph/mdsmap.h
@@ -25,8 +25,9 @@ struct ceph_mdsmap {
u32 m_session_timeout; /* seconds */
u32 m_session_autoclose; /* seconds */
u64 m_max_file_size;
- u32 m_max_mds; /* size of m_addr, m_state arrays */
- int m_num_mds;
+ u32 m_max_mds; /* expected up:active mds number */
+ u32 m_num_active_mds; /* actual up:active mds number */
+ u32 possible_max_rank; /* possible max rank index */
struct ceph_mds_info *m_info;
/* which object pools file data can be stored in */
@@ -42,7 +43,7 @@ struct ceph_mdsmap {
static inline struct ceph_entity_addr *
ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
{
- if (w >= m->m_num_mds)
+ if (w >= m->possible_max_rank)
return NULL;
return &m->m_info[w].addr;
}
@@ -50,14 +51,14 @@ ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w)
{
BUG_ON(w < 0);
- if (w >= m->m_num_mds)
+ if (w >= m->possible_max_rank)
return CEPH_MDS_STATE_DNE;
return m->m_info[w].state;
}
static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
{
- if (w >= 0 && w < m->m_num_mds)
+ if (w >= 0 && w < m->possible_max_rank)
return m->m_info[w].laggy;
return false;
}
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index c4458dc6a757..76371aaae2d1 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -175,9 +175,10 @@ struct ceph_msg_data {
#endif /* CONFIG_BLOCK */
struct ceph_bvec_iter bvec_pos;
struct {
- struct page **pages; /* NOT OWNER. */
+ struct page **pages;
size_t length; /* total # bytes */
unsigned int alignment; /* first page */
+ bool own_pages;
};
struct ceph_pagelist *pagelist;
};
@@ -356,8 +357,8 @@ extern void ceph_con_keepalive(struct ceph_connection *con);
extern bool ceph_con_keepalive_expired(struct ceph_connection *con,
unsigned long interval);
-extern void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
- size_t length, size_t alignment);
+void ceph_msg_data_add_pages(struct ceph_msg *msg, struct page **pages,
+ size_t length, size_t alignment, bool own_pages);
extern void ceph_msg_data_add_pagelist(struct ceph_msg *msg,
struct ceph_pagelist *pagelist);
#ifdef CONFIG_BLOCK
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index eaffbdddf89a..5a62dbd3f4c2 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -534,6 +534,7 @@ int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
struct ceph_object_id *dst_oid,
struct ceph_object_locator *dst_oloc,
u32 dst_fadvise_flags,
+ u32 truncate_seq, u64 truncate_size,
u8 copy_from_flags);
/* watch/notify */
diff --git a/include/linux/ceph/osdmap.h b/include/linux/ceph/osdmap.h
index e081b56f1c1d..5e601975745f 100644
--- a/include/linux/ceph/osdmap.h
+++ b/include/linux/ceph/osdmap.h
@@ -37,6 +37,9 @@ int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs);
#define CEPH_POOL_FLAG_HASHPSPOOL (1ULL << 0) /* hash pg seed and pool id
together */
#define CEPH_POOL_FLAG_FULL (1ULL << 1) /* pool is full */
+#define CEPH_POOL_FLAG_FULL_QUOTA (1ULL << 10) /* pool ran out of quota,
+ will set FULL too */
+#define CEPH_POOL_FLAG_NEARFULL (1ULL << 11) /* pool is nearfull */
struct ceph_pg_pool_info {
struct rb_node node;
@@ -304,5 +307,6 @@ extern struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map,
extern const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id);
extern int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name);
+u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id);
#endif
diff --git a/include/linux/ceph/rados.h b/include/linux/ceph/rados.h
index 3eb0e55665b4..88ed3c5c04c5 100644
--- a/include/linux/ceph/rados.h
+++ b/include/linux/ceph/rados.h
@@ -143,8 +143,10 @@ extern const char *ceph_osd_state_name(int s);
/*
* osd map flag bits
*/
-#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC) */
-#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC) */
+#define CEPH_OSDMAP_NEARFULL (1<<0) /* sync writes (near ENOSPC),
+ not set since ~luminous */
+#define CEPH_OSDMAP_FULL (1<<1) /* no data writes (ENOSPC),
+ not set since ~luminous */
#define CEPH_OSDMAP_PAUSERD (1<<2) /* pause all reads */
#define CEPH_OSDMAP_PAUSEWR (1<<3) /* pause all writes */
#define CEPH_OSDMAP_PAUSEREC (1<<4) /* pause recovery */
@@ -256,6 +258,7 @@ extern const char *ceph_osd_state_name(int s);
\
/* tiering */ \
f(COPY_FROM, __CEPH_OSD_OP(WR, DATA, 26), "copy-from") \
+ f(COPY_FROM2, __CEPH_OSD_OP(WR, DATA, 45), "copy-from2") \
f(COPY_GET_CLASSIC, __CEPH_OSD_OP(RD, DATA, 27), "copy-get-classic") \
f(UNDIRTY, __CEPH_OSD_OP(WR, DATA, 28), "undirty") \
f(ISDIRTY, __CEPH_OSD_OP(RD, DATA, 29), "isdirty") \
@@ -446,6 +449,7 @@ enum {
CEPH_OSD_COPY_FROM_FLAG_MAP_SNAP_CLONE = 8, /* map snap direct to
* cloneid */
CEPH_OSD_COPY_FROM_FLAG_RWORDERED = 16, /* order with write */
+ CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ = 32, /* send truncate_{seq,size} */
};
enum {
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 430e219e3aba..63097cb243cb 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -355,16 +355,6 @@ struct cgroup {
unsigned long flags; /* "unsigned long" so bitops work */
/*
- * idr allocated in-hierarchy ID.
- *
- * ID 0 is not used, the ID of the root cgroup is always 1, and a
- * new cgroup will be assigned with a smallest available ID.
- *
- * Allocating/Removing ID must be protected by cgroup_mutex.
- */
- int id;
-
- /*
* The depth this cgroup is at. The root is at depth zero and each
* step down the hierarchy increments the level. This along with
* ancestor_ids[] can determine whether a given cgroup is a
@@ -458,7 +448,7 @@ struct cgroup {
struct list_head rstat_css_list;
/* cgroup basic resource statistics */
- struct cgroup_base_stat pending_bstat; /* pending from children */
+ struct cgroup_base_stat last_bstat;
struct cgroup_base_stat bstat;
struct prev_cputime prev_cputime; /* for printing out cputime */
@@ -488,7 +478,7 @@ struct cgroup {
struct cgroup_freezer_state freezer;
/* ids of the ancestors at each level including self */
- int ancestor_ids[];
+ u64 ancestor_ids[];
};
/*
@@ -509,7 +499,7 @@ struct cgroup_root {
struct cgroup cgrp;
/* for cgrp->ancestor_ids[0] */
- int cgrp_ancestor_id_storage;
+ u64 cgrp_ancestor_id_storage;
/* Number of cgroups in the hierarchy, used only for /proc/cgroups */
atomic_t nr_cgrps;
@@ -520,9 +510,6 @@ struct cgroup_root {
/* Hierarchy-specific flags */
unsigned int flags;
- /* IDs for cgroups in this hierarchy */
- struct idr cgroup_idr;
-
/* The path to use for release notifications. */
char release_agent_path[PATH_MAX];
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 3ba3e6da13a6..e75d2191226b 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -62,6 +62,7 @@ struct css_task_iter {
struct list_head *mg_tasks_head;
struct list_head *dying_tasks_head;
+ struct list_head *cur_tasks_head;
struct css_set *cur_cset;
struct css_set *cur_dcset;
struct task_struct *cur_task;
@@ -150,7 +151,6 @@ struct task_struct *cgroup_taskset_first(struct cgroup_taskset *tset,
struct task_struct *cgroup_taskset_next(struct cgroup_taskset *tset,
struct cgroup_subsys_state **dst_cssp);
-void cgroup_enable_task_cg_lists(void);
void css_task_iter_start(struct cgroup_subsys_state *css, unsigned int flags,
struct css_task_iter *it);
struct task_struct *css_task_iter_next(struct css_task_iter *it);
@@ -305,6 +305,11 @@ void css_task_iter_end(struct css_task_iter *it);
* Inline functions.
*/
+static inline u64 cgroup_id(struct cgroup *cgrp)
+{
+ return cgrp->kn->id;
+}
+
/**
* css_get - obtain a reference on the specified css
* @css: target css
@@ -566,7 +571,7 @@ static inline bool cgroup_is_descendant(struct cgroup *cgrp,
{
if (cgrp->root != ancestor->root || cgrp->level < ancestor->level)
return false;
- return cgrp->ancestor_ids[ancestor->level] == ancestor->id;
+ return cgrp->ancestor_ids[ancestor->level] == cgroup_id(ancestor);
}
/**
@@ -617,7 +622,7 @@ static inline bool cgroup_is_populated(struct cgroup *cgrp)
/* returns ino associated with a cgroup */
static inline ino_t cgroup_ino(struct cgroup *cgrp)
{
- return cgrp->kn->id.ino;
+ return kernfs_ino(cgrp->kn);
}
/* cft/css accessors for cftype->write() operation */
@@ -688,18 +693,13 @@ static inline void cgroup_kthread_ready(void)
current->no_cgroup_migration = 0;
}
-static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
-{
- return &cgrp->kn->id;
-}
-
-void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
- char *buf, size_t buflen);
+void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen);
#else /* !CONFIG_CGROUPS */
struct cgroup_subsys_state;
struct cgroup;
+static inline u64 cgroup_id(struct cgroup *cgrp) { return 1; }
static inline void css_get(struct cgroup_subsys_state *css) {}
static inline void css_put(struct cgroup_subsys_state *css) {}
static inline int cgroup_attach_task_all(struct task_struct *from,
@@ -719,10 +719,6 @@ static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
static inline void cgroup_init_kthreadd(void) {}
static inline void cgroup_kthread_ready(void) {}
-static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
-{
- return NULL;
-}
static inline struct cgroup *cgroup_parent(struct cgroup *cgrp)
{
@@ -740,8 +736,8 @@ static inline bool task_under_cgroup_hierarchy(struct task_struct *task,
return true;
}
-static inline void cgroup_path_from_kernfs_id(const union kernfs_node_id *id,
- char *buf, size_t buflen) {}
+static inline void cgroup_path_from_kernfs_id(u64 id, char *buf, size_t buflen)
+{}
#endif /* !CONFIG_CGROUPS */
#ifdef CONFIG_CGROUPS
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 2fdfe8061363..bd1ee9039558 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -190,8 +190,14 @@ struct clk_duty {
*
* @init: Perform platform-specific initialization magic.
* This is not not used by any of the basic clock types.
- * Please consider other ways of solving initialization problems
- * before using this callback, as its use is discouraged.
+ * This callback exist for HW which needs to perform some
+ * initialisation magic for CCF to get an accurate view of the
+ * clock. It may also be used dynamic resource allocation is
+ * required. It shall not used to deal with clock parameters,
+ * such as rate or parents.
+ * Returns 0 on success, -EERROR otherwise.
+ *
+ * @terminate: Free any resource allocated by init.
*
* @debug_init: Set up type-specific debugfs entries for this clock. This
* is called once, after the debugfs directory entry for this
@@ -243,7 +249,8 @@ struct clk_ops {
struct clk_duty *duty);
int (*set_duty_cycle)(struct clk_hw *hw,
struct clk_duty *duty);
- void (*init)(struct clk_hw *hw);
+ int (*init)(struct clk_hw *hw);
+ void (*terminate)(struct clk_hw *hw);
void (*debug_init)(struct clk_hw *hw, struct dentry *dentry);
};
@@ -321,29 +328,119 @@ struct clk_hw {
* struct clk_fixed_rate - fixed-rate clock
* @hw: handle between common and hardware-specific interfaces
* @fixed_rate: constant frequency of clock
+ * @fixed_accuracy: constant accuracy of clock in ppb (parts per billion)
+ * @flags: hardware specific flags
+ *
+ * Flags:
+ * * CLK_FIXED_RATE_PARENT_ACCURACY - Use the accuracy of the parent clk
+ * instead of what's set in @fixed_accuracy.
*/
struct clk_fixed_rate {
struct clk_hw hw;
unsigned long fixed_rate;
unsigned long fixed_accuracy;
+ unsigned long flags;
};
-#define to_clk_fixed_rate(_hw) container_of(_hw, struct clk_fixed_rate, hw)
+#define CLK_FIXED_RATE_PARENT_ACCURACY BIT(0)
extern const struct clk_ops clk_fixed_rate_ops;
+struct clk_hw *__clk_hw_register_fixed_rate(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data, unsigned long flags,
+ unsigned long fixed_rate, unsigned long fixed_accuracy,
+ unsigned long clk_fixed_flags);
struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
unsigned long fixed_rate);
-struct clk_hw *clk_hw_register_fixed_rate(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
- unsigned long fixed_rate);
-struct clk *clk_register_fixed_rate_with_accuracy(struct device *dev,
- const char *name, const char *parent_name, unsigned long flags,
- unsigned long fixed_rate, unsigned long fixed_accuracy);
+/**
+ * clk_hw_register_fixed_rate - register fixed-rate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define clk_hw_register_fixed_rate(dev, name, parent_name, flags, fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (fixed_rate), 0, 0)
+/**
+ * clk_hw_register_fixed_rate_parent_hw - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define clk_hw_register_fixed_rate_parent_hw(dev, name, parent_hw, flags, \
+ fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw), \
+ NULL, (flags), (fixed_rate), 0, 0)
+/**
+ * clk_hw_register_fixed_rate_parent_data - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ */
+#define clk_hw_register_fixed_rate_parent_data(dev, name, parent_hw, flags, \
+ fixed_rate) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (fixed_rate), 0, \
+ 0)
+/**
+ * clk_hw_register_fixed_rate_with_accuracy - register fixed-rate clock with
+ * the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ * @fixed_accuracy: non-adjustable clock accuracy
+ */
+#define clk_hw_register_fixed_rate_with_accuracy(dev, name, parent_name, \
+ flags, fixed_rate, \
+ fixed_accuracy) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), (parent_name), \
+ NULL, NULL, (flags), (fixed_rate), \
+ (fixed_accuracy), 0)
+/**
+ * clk_hw_register_fixed_rate_with_accuracy_parent_hw - register fixed-rate
+ * clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ * @fixed_accuracy: non-adjustable clock accuracy
+ */
+#define clk_hw_register_fixed_rate_with_accuracy_parent_hw(dev, name, \
+ parent_hw, flags, fixed_rate, fixed_accuracy) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, (parent_hw) \
+ NULL, NULL, (flags), (fixed_rate), \
+ (fixed_accuracy), 0)
+/**
+ * clk_hw_register_fixed_rate_with_accuracy_parent_data - register fixed-rate
+ * clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @fixed_rate: non-adjustable clock rate
+ * @fixed_accuracy: non-adjustable clock accuracy
+ */
+#define clk_hw_register_fixed_rate_with_accuracy_parent_data(dev, name, \
+ parent_data, flags, fixed_rate, fixed_accuracy) \
+ __clk_hw_register_fixed_rate((dev), NULL, (name), NULL, NULL, \
+ (parent_data), NULL, (flags), \
+ (fixed_rate), (fixed_accuracy), 0)
+
void clk_unregister_fixed_rate(struct clk *clk);
-struct clk_hw *clk_hw_register_fixed_rate_with_accuracy(struct device *dev,
- const char *name, const char *parent_name, unsigned long flags,
- unsigned long fixed_rate, unsigned long fixed_accuracy);
void clk_hw_unregister_fixed_rate(struct clk_hw *hw);
void of_fixed_clk_setup(struct device_node *np);
@@ -386,14 +483,67 @@ struct clk_gate {
#define CLK_GATE_BIG_ENDIAN BIT(2)
extern const struct clk_ops clk_gate_ops;
-struct clk *clk_register_gate(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
+struct clk_hw *__clk_hw_register_gate(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data,
+ unsigned long flags,
void __iomem *reg, u8 bit_idx,
u8 clk_gate_flags, spinlock_t *lock);
-struct clk_hw *clk_hw_register_gate(struct device *dev, const char *name,
+struct clk *clk_register_gate(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 bit_idx,
u8 clk_gate_flags, spinlock_t *lock);
+/**
+ * clk_hw_register_gate - register a gate clock with the clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_name: name of this clock's parent
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_gate(dev, name, parent_name, flags, reg, bit_idx, \
+ clk_gate_flags, lock) \
+ __clk_hw_register_gate((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
+/**
+ * clk_hw_register_gate_parent_hw - register a gate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_gate_parent_hw(dev, name, parent_hw, flags, reg, \
+ bit_idx, clk_gate_flags, lock) \
+ __clk_hw_register_gate((dev), NULL, (name), NULL, (parent_hw), \
+ NULL, (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
+/**
+ * clk_hw_register_gate_parent_data - register a gate clock with the clock
+ * framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags for this clock
+ * @reg: register address to control gating of this clock
+ * @bit_idx: which bit in the register controls gating of this clock
+ * @clk_gate_flags: gate-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_gate_parent_data(dev, name, parent_data, flags, reg, \
+ bit_idx, clk_gate_flags, lock) \
+ __clk_hw_register_gate((dev), NULL, (name), NULL, NULL, (parent_data), \
+ (flags), (reg), (bit_idx), \
+ (clk_gate_flags), (lock))
void clk_unregister_gate(struct clk *clk);
void clk_hw_unregister_gate(struct clk_hw *hw);
int clk_gate_is_enabled(struct clk_hw *hw);
@@ -483,24 +633,153 @@ int divider_get_val(unsigned long rate, unsigned long parent_rate,
const struct clk_div_table *table, u8 width,
unsigned long flags);
-struct clk *clk_register_divider(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width,
- u8 clk_divider_flags, spinlock_t *lock);
-struct clk_hw *clk_hw_register_divider(struct device *dev, const char *name,
- const char *parent_name, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width,
- u8 clk_divider_flags, spinlock_t *lock);
+struct clk_hw *__clk_hw_register_divider(struct device *dev,
+ struct device_node *np, const char *name,
+ const char *parent_name, const struct clk_hw *parent_hw,
+ const struct clk_parent_data *parent_data, unsigned long flags,
+ void __iomem *reg, u8 shift, u8 width, u8 clk_divider_flags,
+ const struct clk_div_table *table, spinlock_t *lock);
struct clk *clk_register_divider_table(struct device *dev, const char *name,
const char *parent_name, unsigned long flags,
void __iomem *reg, u8 shift, u8 width,
u8 clk_divider_flags, const struct clk_div_table *table,
spinlock_t *lock);
-struct clk_hw *clk_hw_register_divider_table(struct device *dev,
- const char *name, const char *parent_name, unsigned long flags,
- void __iomem *reg, u8 shift, u8 width,
- u8 clk_divider_flags, const struct clk_div_table *table,
- spinlock_t *lock);
+/**
+ * clk_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_register_divider(dev, name, parent_name, flags, reg, shift, width, \
+ clk_divider_flags, lock) \
+ clk_register_divider_table((dev), (name), (parent_name), (flags), \
+ (reg), (shift), (width), \
+ (clk_divider_flags), NULL, (lock))
+/**
+ * clk_hw_register_divider - register a divider clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider(dev, name, parent_name, flags, reg, shift, \
+ width, clk_divider_flags, lock) \
+ __clk_hw_register_divider((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (reg), (shift), (width), \
+ (clk_divider_flags), NULL, (lock))
+/**
+ * clk_hw_register_divider_parent_hw - register a divider clock with the clock
+ * framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_parent_hw(dev, name, parent_hw, flags, reg, \
+ shift, width, clk_divider_flags, \
+ lock) \
+ __clk_hw_register_divider((dev), NULL, (name), NULL, (parent_hw), \
+ NULL, (flags), (reg), (shift), (width), \
+ (clk_divider_flags), NULL, (lock))
+/**
+ * clk_hw_register_divider_parent_data - register a divider clock with the clock
+ * framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_parent_data(dev, name, parent_data, flags, \
+ reg, shift, width, \
+ clk_divider_flags, lock) \
+ __clk_hw_register_divider((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (reg), (shift), \
+ (width), (clk_divider_flags), NULL, (lock))
+/**
+ * clk_hw_register_divider_table - register a table based divider clock with
+ * the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_name: name of clock's parent
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_table(dev, name, parent_name, flags, reg, \
+ shift, width, clk_divider_flags, table, \
+ lock) \
+ __clk_hw_register_divider((dev), NULL, (name), (parent_name), NULL, \
+ NULL, (flags), (reg), (shift), (width), \
+ (clk_divider_flags), (table), (lock))
+/**
+ * clk_hw_register_divider_table_parent_hw - register a table based divider
+ * clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_hw: pointer to parent clk
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_table_parent_hw(dev, name, parent_hw, flags, \
+ reg, shift, width, \
+ clk_divider_flags, table, \
+ lock) \
+ __clk_hw_register_divider((dev), NULL, (name), NULL, (parent_hw), \
+ NULL, (flags), (reg), (shift), (width), \
+ (clk_divider_flags), (table), (lock))
+/**
+ * clk_hw_register_divider_table_parent_data - register a table based divider
+ * clock with the clock framework
+ * @dev: device registering this clock
+ * @name: name of this clock
+ * @parent_data: parent clk data
+ * @flags: framework-specific flags
+ * @reg: register address to adjust divider
+ * @shift: number of bits to shift the bitfield
+ * @width: width of the bitfield
+ * @clk_divider_flags: divider-specific flags for this clock
+ * @table: array of divider/value pairs ending with a div set to 0
+ * @lock: shared register lock for this clock
+ */
+#define clk_hw_register_divider_table_parent_data(dev, name, parent_data, \
+ flags, reg, shift, width, \
+ clk_divider_flags, table, \
+ lock) \
+ __clk_hw_register_divider((dev), NULL, (name), NULL, NULL, \
+ (parent_data), (flags), (reg), (shift), \
+ (width), (clk_divider_flags), (table), \
+ (lock))
+
void clk_unregister_divider(struct clk *clk);
void clk_hw_unregister_divider(struct clk_hw *hw);
@@ -555,28 +834,48 @@ struct clk_mux {
extern const struct clk_ops clk_mux_ops;
extern const struct clk_ops clk_mux_ro_ops;
-struct clk *clk_register_mux(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents,
- unsigned long flags,
- void __iomem *reg, u8 shift, u8 width,
- u8 clk_mux_flags, spinlock_t *lock);
-struct clk_hw *clk_hw_register_mux(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents,
- unsigned long flags,
- void __iomem *reg, u8 shift, u8 width,
- u8 clk_mux_flags, spinlock_t *lock);
-
-struct clk *clk_register_mux_table(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents,
- unsigned long flags,
- void __iomem *reg, u8 shift, u32 mask,
+struct clk_hw *__clk_hw_register_mux(struct device *dev, struct device_node *np,
+ const char *name, u8 num_parents,
+ const char * const *parent_names,
+ const struct clk_hw **parent_hws,
+ const struct clk_parent_data *parent_data,
+ unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
u8 clk_mux_flags, u32 *table, spinlock_t *lock);
-struct clk_hw *clk_hw_register_mux_table(struct device *dev, const char *name,
+struct clk *clk_register_mux_table(struct device *dev, const char *name,
const char * const *parent_names, u8 num_parents,
- unsigned long flags,
- void __iomem *reg, u8 shift, u32 mask,
+ unsigned long flags, void __iomem *reg, u8 shift, u32 mask,
u8 clk_mux_flags, u32 *table, spinlock_t *lock);
+#define clk_register_mux(dev, name, parent_names, num_parents, flags, reg, \
+ shift, width, clk_mux_flags, lock) \
+ clk_register_mux_table((dev), (name), (parent_names), (num_parents), \
+ (flags), (reg), (shift), BIT((width)) - 1, \
+ (clk_mux_flags), NULL, (lock))
+#define clk_hw_register_mux_table(dev, name, parent_names, num_parents, \
+ flags, reg, shift, mask, clk_mux_flags, \
+ table, lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), \
+ (parent_names), NULL, NULL, (flags), (reg), \
+ (shift), (mask), (clk_mux_flags), (table), \
+ (lock))
+#define clk_hw_register_mux(dev, name, parent_names, num_parents, flags, reg, \
+ shift, width, clk_mux_flags, lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), \
+ (parent_names), NULL, NULL, (flags), (reg), \
+ (shift), BIT((width)) - 1, (clk_mux_flags), \
+ NULL, (lock))
+#define clk_hw_register_mux_hws(dev, name, parent_hws, num_parents, flags, \
+ reg, shift, width, clk_mux_flags, lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, \
+ (parent_hws), NULL, (flags), (reg), (shift), \
+ BIT((width)) - 1, (clk_mux_flags), NULL, (lock))
+#define clk_hw_register_mux_parent_data(dev, name, parent_data, num_parents, \
+ flags, reg, shift, width, \
+ clk_mux_flags, lock) \
+ __clk_hw_register_mux((dev), NULL, (name), (num_parents), NULL, NULL, \
+ (parent_data), (flags), (reg), (shift), \
+ BIT((width)) - 1, (clk_mux_flags), NULL, (lock))
+
int clk_mux_val_to_index(struct clk_hw *hw, u32 *table, unsigned int flags,
unsigned int val);
unsigned int clk_mux_index_to_val(u32 *table, unsigned int flags, u8 index);
@@ -743,6 +1042,12 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags);
+struct clk *clk_register_composite_pdata(struct device *dev, const char *name,
+ const struct clk_parent_data *parent_data, int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
+ unsigned long flags);
void clk_unregister_composite(struct clk *clk);
struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
const char * const *parent_names, int num_parents,
@@ -750,45 +1055,14 @@ struct clk_hw *clk_hw_register_composite(struct device *dev, const char *name,
struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags);
-void clk_hw_unregister_composite(struct clk_hw *hw);
-
-/**
- * struct clk_gpio - gpio gated clock
- *
- * @hw: handle between common and hardware-specific interfaces
- * @gpiod: gpio descriptor
- *
- * Clock with a gpio control for enabling and disabling the parent clock
- * or switching between two parents by asserting or deasserting the gpio.
- *
- * Implements .enable, .disable and .is_enabled or
- * .get_parent, .set_parent and .determine_rate depending on which clk_ops
- * is used.
- */
-struct clk_gpio {
- struct clk_hw hw;
- struct gpio_desc *gpiod;
-};
-
-#define to_clk_gpio(_hw) container_of(_hw, struct clk_gpio, hw)
-
-extern const struct clk_ops clk_gpio_gate_ops;
-struct clk *clk_register_gpio_gate(struct device *dev, const char *name,
- const char *parent_name, struct gpio_desc *gpiod,
- unsigned long flags);
-struct clk_hw *clk_hw_register_gpio_gate(struct device *dev, const char *name,
- const char *parent_name, struct gpio_desc *gpiod,
- unsigned long flags);
-void clk_hw_unregister_gpio_gate(struct clk_hw *hw);
-
-extern const struct clk_ops clk_gpio_mux_ops;
-struct clk *clk_register_gpio_mux(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
- unsigned long flags);
-struct clk_hw *clk_hw_register_gpio_mux(struct device *dev, const char *name,
- const char * const *parent_names, u8 num_parents, struct gpio_desc *gpiod,
+struct clk_hw *clk_hw_register_composite_pdata(struct device *dev,
+ const char *name,
+ const struct clk_parent_data *parent_data, int num_parents,
+ struct clk_hw *mux_hw, const struct clk_ops *mux_ops,
+ struct clk_hw *rate_hw, const struct clk_ops *rate_ops,
+ struct clk_hw *gate_hw, const struct clk_ops *gate_ops,
unsigned long flags);
-void clk_hw_unregister_gpio_mux(struct clk_hw *hw);
+void clk_hw_unregister_composite(struct clk_hw *hw);
struct clk *clk_register(struct device *dev, struct clk_hw *hw);
struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw);
@@ -818,6 +1092,7 @@ unsigned int clk_hw_get_num_parents(const struct clk_hw *hw);
struct clk_hw *clk_hw_get_parent(const struct clk_hw *hw);
struct clk_hw *clk_hw_get_parent_by_index(const struct clk_hw *hw,
unsigned int index);
+int clk_hw_get_parent_index(struct clk_hw *hw);
int clk_hw_set_parent(struct clk_hw *hw, struct clk_hw *new_parent);
unsigned int __clk_get_enable_count(struct clk *clk);
unsigned long clk_hw_get_rate(const struct clk_hw *hw);
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 18b7b95a8253..7fd6a1febcf4 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -627,6 +627,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate);
* @clk: clock source
* @rate: desired clock rate in Hz
*
+ * Updating the rate starts at the top-most affected clock and then
+ * walks the tree down to the bottom-most clock that needs updating.
+ *
* Returns success (0) or negative errno.
*/
int clk_set_rate(struct clk *clk, unsigned long rate);
diff --git a/include/linux/clk/tegra.h b/include/linux/clk/tegra.h
index b8aef62cc3f5..2b1b35240074 100644
--- a/include/linux/clk/tegra.h
+++ b/include/linux/clk/tegra.h
@@ -108,6 +108,19 @@ static inline void tegra_cpu_clock_resume(void)
tegra_cpu_car_ops->resume();
}
+#else
+static inline bool tegra_cpu_rail_off_ready(void)
+{
+ return false;
+}
+
+static inline void tegra_cpu_clock_suspend(void)
+{
+}
+
+static inline void tegra_cpu_clock_resume(void)
+{
+}
#endif
extern void tegra210_xusb_pll_hw_control_enable(void);
@@ -119,4 +132,15 @@ extern void tegra210_put_utmipll_in_iddq(void);
extern void tegra210_put_utmipll_out_iddq(void);
extern int tegra210_clk_handle_mbist_war(unsigned int id);
+struct clk;
+
+typedef long (tegra20_clk_emc_round_cb)(unsigned long rate,
+ unsigned long min_rate,
+ unsigned long max_rate,
+ void *arg);
+
+void tegra20_clk_set_emc_round_callback(tegra20_clk_emc_round_cb *round_cb,
+ void *cb_arg);
+int tegra20_clk_prepare_emc_mc_same_freq(struct clk *emc_clk, bool same);
+
#endif /* __LINUX_CLK_TEGRA_H_ */
diff --git a/include/linux/clk/ti.h b/include/linux/clk/ti.h
index 1e8ef96555ce..c62f6fa6763d 100644
--- a/include/linux/clk/ti.h
+++ b/include/linux/clk/ti.h
@@ -153,7 +153,7 @@ struct clk_hw_omap {
u8 fixed_div;
struct clk_omap_reg enable_reg;
u8 enable_bit;
- u8 flags;
+ unsigned long flags;
struct clk_omap_reg clksel_reg;
struct dpll_data *dpll_data;
const char *clkdm_name;
@@ -298,6 +298,7 @@ struct ti_clk_features {
void ti_clk_setup_features(struct ti_clk_features *features);
const struct ti_clk_features *ti_clk_get_features(void);
+bool ti_clk_is_in_standby(struct clk *clk);
int omap3_noncore_dpll_save_context(struct clk_hw *hw);
void omap3_noncore_dpll_restore_context(struct clk_hw *hw);
diff --git a/include/linux/clock_cooling.h b/include/linux/clock_cooling.h
index b5cebf766e02..4b0a69863656 100644
--- a/include/linux/clock_cooling.h
+++ b/include/linux/clock_cooling.h
@@ -7,7 +7,7 @@
* Copyright (C) 2013 Texas Instruments Inc.
* Contact: Eduardo Valentin <eduardo.valentin@ti.com>
*
- * Highly based on cpu_cooling.c.
+ * Highly based on cpufreq_cooling.c.
* Copyright (C) 2012 Samsung Electronics Co., Ltd(http://www.samsung.com)
* Copyright (C) 2012 Amit Daniel <amit.kachhap@linaro.org>
*/
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index b21db536fd52..86d143db6523 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -23,18 +23,31 @@
struct clocksource;
struct module;
-#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
+#if defined(CONFIG_ARCH_CLOCKSOURCE_DATA) || \
+ defined(CONFIG_GENERIC_GETTIMEOFDAY)
#include <asm/clocksource.h>
#endif
+#include <vdso/clocksource.h>
+
/**
* struct clocksource - hardware abstraction for a free running counter
* Provides mostly state-free accessors to the underlying hardware.
* This is the structure used for system time.
*
- * @name: ptr to clocksource name
- * @list: list head for registration
- * @rating: rating value for selection (higher is better)
+ * @read: Returns a cycle value, passes clocksource as argument
+ * @mask: Bitmask for two's complement
+ * subtraction of non 64 bit counters
+ * @mult: Cycle to nanosecond multiplier
+ * @shift: Cycle to nanosecond divisor (power of two)
+ * @max_idle_ns: Maximum idle time permitted by the clocksource (nsecs)
+ * @maxadj: Maximum adjustment value to mult (~11%)
+ * @archdata: Optional arch-specific data
+ * @max_cycles: Maximum safe cycle value which won't overflow on
+ * multiplication
+ * @name: Pointer to clocksource name
+ * @list: List head for registration (internal)
+ * @rating: Rating value for selection (higher is better)
* To avoid rating inflation the following
* list should give you a guide as to how
* to assign your clocksource a rating
@@ -49,27 +62,23 @@ struct module;
* 400-499: Perfect
* The ideal clocksource. A must-use where
* available.
- * @read: returns a cycle value, passes clocksource as argument
- * @enable: optional function to enable the clocksource
- * @disable: optional function to disable the clocksource
- * @mask: bitmask for two's complement
- * subtraction of non 64 bit counters
- * @mult: cycle to nanosecond multiplier
- * @shift: cycle to nanosecond divisor (power of two)
- * @max_idle_ns: max idle time permitted by the clocksource (nsecs)
- * @maxadj: maximum adjustment value to mult (~11%)
- * @max_cycles: maximum safe cycle value which won't overflow on multiplication
- * @flags: flags describing special properties
- * @archdata: arch-specific data
- * @suspend: suspend function for the clocksource, if necessary
- * @resume: resume function for the clocksource, if necessary
+ * @flags: Flags describing special properties
+ * @enable: Optional function to enable the clocksource
+ * @disable: Optional function to disable the clocksource
+ * @suspend: Optional suspend function for the clocksource
+ * @resume: Optional resume function for the clocksource
* @mark_unstable: Optional function to inform the clocksource driver that
* the watchdog marked the clocksource unstable
- * @owner: module reference, must be set by clocksource in modules
+ * @tick_stable: Optional function called periodically from the watchdog
+ * code to provide stable syncrhonization points
+ * @wd_list: List head to enqueue into the watchdog list (internal)
+ * @cs_last: Last clocksource value for clocksource watchdog
+ * @wd_last: Last watchdog value corresponding to @cs_last
+ * @owner: Module reference, must be set by clocksource in modules
*
* Note: This struct is not used in hotpathes of the timekeeping code
* because the timekeeper caches the hot path fields in its own data
- * structure, so no line cache alignment is required,
+ * structure, so no cache line alignment is required,
*
* The pointer to the clocksource itself is handed to the read
* callback. If you need extra information there you can wrap struct
@@ -78,35 +87,37 @@ struct module;
* structure.
*/
struct clocksource {
- u64 (*read)(struct clocksource *cs);
- u64 mask;
- u32 mult;
- u32 shift;
- u64 max_idle_ns;
- u32 maxadj;
+ u64 (*read)(struct clocksource *cs);
+ u64 mask;
+ u32 mult;
+ u32 shift;
+ u64 max_idle_ns;
+ u32 maxadj;
#ifdef CONFIG_ARCH_CLOCKSOURCE_DATA
struct arch_clocksource_data archdata;
#endif
- u64 max_cycles;
- const char *name;
- struct list_head list;
- int rating;
- int (*enable)(struct clocksource *cs);
- void (*disable)(struct clocksource *cs);
- unsigned long flags;
- void (*suspend)(struct clocksource *cs);
- void (*resume)(struct clocksource *cs);
- void (*mark_unstable)(struct clocksource *cs);
- void (*tick_stable)(struct clocksource *cs);
+ u64 max_cycles;
+ const char *name;
+ struct list_head list;
+ int rating;
+ enum vdso_clock_mode vdso_clock_mode;
+ unsigned long flags;
+
+ int (*enable)(struct clocksource *cs);
+ void (*disable)(struct clocksource *cs);
+ void (*suspend)(struct clocksource *cs);
+ void (*resume)(struct clocksource *cs);
+ void (*mark_unstable)(struct clocksource *cs);
+ void (*tick_stable)(struct clocksource *cs);
/* private: */
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
/* Watchdog related data, used by the framework */
- struct list_head wd_list;
- u64 cs_last;
- u64 wd_last;
+ struct list_head wd_list;
+ u64 cs_last;
+ u64 wd_last;
#endif
- struct module *owner;
+ struct module *owner;
};
/*
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 16dafd9f4b86..0480ba4db592 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -116,14 +116,7 @@ typedef __compat_gid32_t compat_gid_t;
struct compat_sel_arg_struct;
struct rusage;
-struct compat_itimerval {
- struct old_timeval32 it_interval;
- struct old_timeval32 it_value;
-};
-
-struct itimerval;
-int get_compat_itimerval(struct itimerval *, const struct compat_itimerval __user *);
-int put_compat_itimerval(struct compat_itimerval __user *, const struct itimerval *);
+struct old_itimerval32;
struct compat_tms {
compat_clock_t tms_utime;
@@ -255,15 +248,6 @@ typedef struct compat_siginfo {
} _sifields;
} compat_siginfo_t;
-/*
- * These functions operate on 32- or 64-bit specs depending on
- * COMPAT_USE_64BIT_TIME, hence the void user pointer arguments.
- */
-extern int compat_get_timespec(struct timespec *, const void __user *);
-extern int compat_put_timespec(const struct timespec *, void __user *);
-extern int compat_get_timeval(struct timeval *, const void __user *);
-extern int compat_put_timeval(const struct timeval *, void __user *);
-
struct compat_iovec {
compat_uptr_t iov_base;
compat_size_t iov_len;
@@ -410,8 +394,6 @@ struct compat_kexec_segment;
struct compat_mq_attr;
struct compat_msgbuf;
-extern void compat_exit_robust_list(struct task_struct *curr);
-
#define BITS_PER_COMPAT_LONG (8*sizeof(compat_long_t))
#define BITS_TO_COMPAT_LONGS(bits) DIV_ROUND_UP(bits, BITS_PER_COMPAT_LONG)
@@ -425,26 +407,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *to, const kernel_siginf
int get_compat_sigevent(struct sigevent *event,
const struct compat_sigevent __user *u_event);
-static inline int old_timeval32_compare(struct old_timeval32 *lhs,
- struct old_timeval32 *rhs)
-{
- if (lhs->tv_sec < rhs->tv_sec)
- return -1;
- if (lhs->tv_sec > rhs->tv_sec)
- return 1;
- return lhs->tv_usec - rhs->tv_usec;
-}
-
-static inline int old_timespec32_compare(struct old_timespec32 *lhs,
- struct old_timespec32 *rhs)
-{
- if (lhs->tv_sec < rhs->tv_sec)
- return -1;
- if (lhs->tv_sec > rhs->tv_sec)
- return 1;
- return lhs->tv_nsec - rhs->tv_nsec;
-}
-
extern int get_compat_sigset(sigset_t *set, const compat_sigset_t __user *compat);
/*
@@ -492,12 +454,13 @@ extern void __user *compat_alloc_user_space(unsigned long len);
int compat_restore_altstack(const compat_stack_t __user *uss);
int __compat_save_altstack(compat_stack_t __user *, unsigned long);
-#define compat_save_altstack_ex(uss, sp) do { \
+#define unsafe_compat_save_altstack(uss, sp, label) do { \
compat_stack_t __user *__uss = uss; \
struct task_struct *t = current; \
- put_user_ex(ptr_to_compat((void __user *)t->sas_ss_sp), &__uss->ss_sp); \
- put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \
- put_user_ex(t->sas_ss_size, &__uss->ss_size); \
+ unsafe_put_user(ptr_to_compat((void __user *)t->sas_ss_sp), \
+ &__uss->ss_sp, label); \
+ unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \
+ unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \
if (t->sas_ss_flags & SS_AUTODISARM) \
sas_ss_reset(t); \
} while (0);
@@ -668,10 +631,10 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
/* kernel/itimer.c */
asmlinkage long compat_sys_getitimer(int which,
- struct compat_itimerval __user *it);
+ struct old_itimerval32 __user *it);
asmlinkage long compat_sys_setitimer(int which,
- struct compat_itimerval __user *in,
- struct compat_itimerval __user *out);
+ struct old_itimerval32 __user *in,
+ struct old_itimerval32 __user *out);
/* kernel/kexec.c */
asmlinkage long compat_sys_kexec_load(compat_ulong_t entry,
@@ -937,10 +900,10 @@ static inline bool in_compat_syscall(void) { return is_compat_task(); }
*/
static inline struct old_timeval32 ns_to_old_timeval32(s64 nsec)
{
- struct timeval tv;
+ struct __kernel_old_timeval tv;
struct old_timeval32 ctv;
- tv = ns_to_timeval(nsec);
+ tv = ns_to_kernel_old_timeval(nsec);
ctv.tv_sec = tv.tv_sec;
ctv.tv_usec = tv.tv_usec;
@@ -967,4 +930,22 @@ static inline bool in_compat_syscall(void) { return false; }
#endif /* CONFIG_COMPAT */
+/*
+ * A pointer passed in from user mode. This should not
+ * be used for syscall parameters, just declare them
+ * as pointers because the syscall entry code will have
+ * appropriately converted them already.
+ */
+#ifndef compat_ptr
+static inline void __user *compat_ptr(compat_uptr_t uptr)
+{
+ return (void __user *)(unsigned long)uptr;
+}
+#endif
+
+static inline compat_uptr_t ptr_to_compat(void __user *uptr)
+{
+ return (u32)(unsigned long)uptr;
+}
+
#endif /* _LINUX_COMPAT_H */
diff --git a/include/linux/completion.h b/include/linux/completion.h
index 519e94915d18..bf8e77001f18 100644
--- a/include/linux/completion.h
+++ b/include/linux/completion.h
@@ -9,7 +9,7 @@
* See kernel/sched/completion.c for details.
*/
-#include <linux/wait.h>
+#include <linux/swait.h>
/*
* struct completion - structure used to maintain state for a "completion"
@@ -25,7 +25,7 @@
*/
struct completion {
unsigned int done;
- wait_queue_head_t wait;
+ struct swait_queue_head wait;
};
#define init_completion_map(x, m) __init_completion(x)
@@ -34,7 +34,7 @@ static inline void complete_acquire(struct completion *x) {}
static inline void complete_release(struct completion *x) {}
#define COMPLETION_INITIALIZER(work) \
- { 0, __WAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
+ { 0, __SWAIT_QUEUE_HEAD_INITIALIZER((work).wait) }
#define COMPLETION_INITIALIZER_ONSTACK_MAP(work, map) \
(*({ init_completion_map(&(work), &(map)); &(work); }))
@@ -85,7 +85,7 @@ static inline void complete_release(struct completion *x) {}
static inline void __init_completion(struct completion *x)
{
x->done = 0;
- init_waitqueue_head(&x->wait);
+ init_swait_queue_head(&x->wait);
}
/**
diff --git a/include/linux/console.h b/include/linux/console.h
index d09951d5a94e..f33016b3a401 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -101,7 +101,6 @@ extern const struct consw *conswitchp;
extern const struct consw dummy_con; /* dummy console buffer */
extern const struct consw vga_con; /* VGA text console */
extern const struct consw newport_con; /* SGI Newport console */
-extern const struct consw prom_con; /* SPARC PROM console */
int con_is_bound(const struct consw *csw);
int do_unregister_con_driver(const struct consw *csw);
@@ -201,7 +200,6 @@ extern void suspend_console(void);
extern void resume_console(void);
int mda_console_init(void);
-void prom_con_init(void);
void vcs_make_sysfs(int index);
void vcs_remove_sysfs(int index);
diff --git a/include/linux/const.h b/include/linux/const.h
index 7b55a55f5911..81b8aae5a855 100644
--- a/include/linux/const.h
+++ b/include/linux/const.h
@@ -1,9 +1,6 @@
#ifndef _LINUX_CONST_H
#define _LINUX_CONST_H
-#include <uapi/linux/const.h>
-
-#define UL(x) (_UL(x))
-#define ULL(x) (_ULL(x))
+#include <vdso/const.h>
#endif /* _LINUX_CONST_H */
diff --git a/include/linux/context_tracking.h b/include/linux/context_tracking.h
index d05609ad329d..8150f5ac176c 100644
--- a/include/linux/context_tracking.h
+++ b/include/linux/context_tracking.h
@@ -22,26 +22,26 @@ extern void context_tracking_user_exit(void);
static inline void user_enter(void)
{
- if (context_tracking_is_enabled())
+ if (context_tracking_enabled())
context_tracking_enter(CONTEXT_USER);
}
static inline void user_exit(void)
{
- if (context_tracking_is_enabled())
+ if (context_tracking_enabled())
context_tracking_exit(CONTEXT_USER);
}
/* Called with interrupts disabled. */
static inline void user_enter_irqoff(void)
{
- if (context_tracking_is_enabled())
+ if (context_tracking_enabled())
__context_tracking_enter(CONTEXT_USER);
}
static inline void user_exit_irqoff(void)
{
- if (context_tracking_is_enabled())
+ if (context_tracking_enabled())
__context_tracking_exit(CONTEXT_USER);
}
@@ -49,7 +49,7 @@ static inline enum ctx_state exception_enter(void)
{
enum ctx_state prev_ctx;
- if (!context_tracking_is_enabled())
+ if (!context_tracking_enabled())
return 0;
prev_ctx = this_cpu_read(context_tracking.state);
@@ -61,7 +61,7 @@ static inline enum ctx_state exception_enter(void)
static inline void exception_exit(enum ctx_state prev_ctx)
{
- if (context_tracking_is_enabled()) {
+ if (context_tracking_enabled()) {
if (prev_ctx != CONTEXT_KERNEL)
context_tracking_enter(prev_ctx);
}
@@ -77,7 +77,7 @@ static inline void exception_exit(enum ctx_state prev_ctx)
*/
static inline enum ctx_state ct_state(void)
{
- return context_tracking_is_enabled() ?
+ return context_tracking_enabled() ?
this_cpu_read(context_tracking.state) : CONTEXT_DISABLED;
}
#else
@@ -90,7 +90,7 @@ static inline void exception_exit(enum ctx_state prev_ctx) { }
static inline enum ctx_state ct_state(void) { return CONTEXT_DISABLED; }
#endif /* !CONFIG_CONTEXT_TRACKING */
-#define CT_WARN_ON(cond) WARN_ON(context_tracking_is_enabled() && (cond))
+#define CT_WARN_ON(cond) WARN_ON(context_tracking_enabled() && (cond))
#ifdef CONFIG_CONTEXT_TRACKING_FORCE
extern void context_tracking_init(void);
@@ -103,12 +103,12 @@ static inline void context_tracking_init(void) { }
/* must be called with irqs disabled */
static inline void guest_enter_irqoff(void)
{
- if (vtime_accounting_cpu_enabled())
+ if (vtime_accounting_enabled_this_cpu())
vtime_guest_enter(current);
else
current->flags |= PF_VCPU;
- if (context_tracking_is_enabled())
+ if (context_tracking_enabled())
__context_tracking_enter(CONTEXT_GUEST);
/* KVM does not hold any references to rcu protected data when it
@@ -118,16 +118,16 @@ static inline void guest_enter_irqoff(void)
* one time slice). Lets treat guest mode as quiescent state, just like
* we do with user-mode execution.
*/
- if (!context_tracking_cpu_is_enabled())
+ if (!context_tracking_enabled_this_cpu())
rcu_virt_note_context_switch(smp_processor_id());
}
static inline void guest_exit_irqoff(void)
{
- if (context_tracking_is_enabled())
+ if (context_tracking_enabled())
__context_tracking_exit(CONTEXT_GUEST);
- if (vtime_accounting_cpu_enabled())
+ if (vtime_accounting_enabled_this_cpu())
vtime_guest_exit(current);
else
current->flags &= ~PF_VCPU;
@@ -141,7 +141,7 @@ static inline void guest_enter_irqoff(void)
* to assume that it's the stime pending cputime
* to flush.
*/
- vtime_account_system(current);
+ vtime_account_kernel(current);
current->flags |= PF_VCPU;
rcu_virt_note_context_switch(smp_processor_id());
}
@@ -149,20 +149,11 @@ static inline void guest_enter_irqoff(void)
static inline void guest_exit_irqoff(void)
{
/* Flush the guest cputime we spent on the guest */
- vtime_account_system(current);
+ vtime_account_kernel(current);
current->flags &= ~PF_VCPU;
}
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_GEN */
-static inline void guest_enter(void)
-{
- unsigned long flags;
-
- local_irq_save(flags);
- guest_enter_irqoff();
- local_irq_restore(flags);
-}
-
static inline void guest_exit(void)
{
unsigned long flags;
diff --git a/include/linux/context_tracking_state.h b/include/linux/context_tracking_state.h
index f128dc3be0df..e7fe6678b7ad 100644
--- a/include/linux/context_tracking_state.h
+++ b/include/linux/context_tracking_state.h
@@ -23,17 +23,22 @@ struct context_tracking {
};
#ifdef CONFIG_CONTEXT_TRACKING
-extern struct static_key_false context_tracking_enabled;
+extern struct static_key_false context_tracking_key;
DECLARE_PER_CPU(struct context_tracking, context_tracking);
-static inline bool context_tracking_is_enabled(void)
+static inline bool context_tracking_enabled(void)
{
- return static_branch_unlikely(&context_tracking_enabled);
+ return static_branch_unlikely(&context_tracking_key);
}
-static inline bool context_tracking_cpu_is_enabled(void)
+static inline bool context_tracking_enabled_cpu(int cpu)
{
- return __this_cpu_read(context_tracking.active);
+ return context_tracking_enabled() && per_cpu(context_tracking.active, cpu);
+}
+
+static inline bool context_tracking_enabled_this_cpu(void)
+{
+ return context_tracking_enabled() && __this_cpu_read(context_tracking.active);
}
static inline bool context_tracking_in_user(void)
@@ -42,9 +47,9 @@ static inline bool context_tracking_in_user(void)
}
#else
static inline bool context_tracking_in_user(void) { return false; }
-static inline bool context_tracking_active(void) { return false; }
-static inline bool context_tracking_is_enabled(void) { return false; }
-static inline bool context_tracking_cpu_is_enabled(void) { return false; }
+static inline bool context_tracking_enabled(void) { return false; }
+static inline bool context_tracking_enabled_cpu(int cpu) { return false; }
+static inline bool context_tracking_enabled_this_cpu(void) { return false; }
#endif /* CONFIG_CONTEXT_TRACKING */
#endif
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index a2b68823717b..44e552de419c 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -285,6 +285,8 @@ extern void coresight_disclaim_device(void __iomem *base);
extern void coresight_disclaim_device_unlocked(void __iomem *base);
extern char *coresight_alloc_device_name(struct coresight_dev_list *devs,
struct device *dev);
+
+extern bool coresight_loses_context_with_cpu(struct device *dev);
#else
static inline struct coresight_device *
coresight_register(struct coresight_desc *desc) { return NULL; }
@@ -307,6 +309,10 @@ static inline int coresight_claim_device(void __iomem *base)
static inline void coresight_disclaim_device(void __iomem *base) {}
static inline void coresight_disclaim_device_unlocked(void __iomem *base) {}
+static inline bool coresight_loses_context_with_cpu(struct device *dev)
+{
+ return false;
+}
#endif
extern int coresight_get_cpu(struct device *dev);
diff --git a/include/linux/counter.h b/include/linux/counter.h
index a061cdcdef7c..9dbd5df4cd34 100644
--- a/include/linux/counter.h
+++ b/include/linux/counter.h
@@ -290,53 +290,22 @@ struct counter_device_state {
const struct attribute_group **groups;
};
-/**
- * struct counter_signal_read_value - Opaque Signal read value
- * @buf: string representation of Signal read value
- * @len: length of string in @buf
- */
-struct counter_signal_read_value {
- char *buf;
- size_t len;
-};
-
-/**
- * struct counter_count_read_value - Opaque Count read value
- * @buf: string representation of Count read value
- * @len: length of string in @buf
- */
-struct counter_count_read_value {
- char *buf;
- size_t len;
-};
-
-/**
- * struct counter_count_write_value - Opaque Count write value
- * @buf: string representation of Count write value
- */
-struct counter_count_write_value {
- const char *buf;
+enum counter_signal_value {
+ COUNTER_SIGNAL_LOW = 0,
+ COUNTER_SIGNAL_HIGH
};
/**
* struct counter_ops - Callbacks from driver
* @signal_read: optional read callback for Signal attribute. The read
* value of the respective Signal should be passed back via
- * the val parameter. val points to an opaque type which
- * should be set only by calling the
- * counter_signal_read_value_set function from within the
- * signal_read callback.
+ * the val parameter.
* @count_read: optional read callback for Count attribute. The read
* value of the respective Count should be passed back via
- * the val parameter. val points to an opaque type which
- * should be set only by calling the
- * counter_count_read_value_set function from within the
- * count_read callback.
+ * the val parameter.
* @count_write: optional write callback for Count attribute. The write
* value for the respective Count is passed in via the val
- * parameter. val points to an opaque type which should be
- * accessed only by calling the
- * counter_count_write_value_get function.
+ * parameter.
* @function_get: function to get the current count function mode. Returns
* 0 on success and negative error code on error. The index
* of the respective Count's returned function mode should
@@ -346,7 +315,7 @@ struct counter_count_write_value {
* Count's functions_list array.
* @action_get: function to get the current action mode. Returns 0 on
* success and negative error code on error. The index of
- * the respective Signal's returned action mode should be
+ * the respective Synapse's returned action mode should be
* passed back via the action parameter.
* @action_set: function to set the action mode. action is the index of
* the requested action mode from the respective Synapse's
@@ -355,13 +324,11 @@ struct counter_count_write_value {
struct counter_ops {
int (*signal_read)(struct counter_device *counter,
struct counter_signal *signal,
- struct counter_signal_read_value *val);
+ enum counter_signal_value *val);
int (*count_read)(struct counter_device *counter,
- struct counter_count *count,
- struct counter_count_read_value *val);
+ struct counter_count *count, unsigned long *val);
int (*count_write)(struct counter_device *counter,
- struct counter_count *count,
- struct counter_count_write_value *val);
+ struct counter_count *count, unsigned long val);
int (*function_get)(struct counter_device *counter,
struct counter_count *count, size_t *function);
int (*function_set)(struct counter_device *counter,
@@ -477,29 +444,6 @@ struct counter_device {
void *priv;
};
-enum counter_signal_level {
- COUNTER_SIGNAL_LEVEL_LOW = 0,
- COUNTER_SIGNAL_LEVEL_HIGH
-};
-
-enum counter_signal_value_type {
- COUNTER_SIGNAL_LEVEL = 0
-};
-
-enum counter_count_value_type {
- COUNTER_COUNT_POSITION = 0,
-};
-
-void counter_signal_read_value_set(struct counter_signal_read_value *const val,
- const enum counter_signal_value_type type,
- void *const data);
-void counter_count_read_value_set(struct counter_count_read_value *const val,
- const enum counter_count_value_type type,
- void *const data);
-int counter_count_write_value_get(void *const data,
- const enum counter_count_value_type type,
- const struct counter_count_write_value *const val);
-
int counter_register(struct counter_device *const counter);
void counter_unregister(struct counter_device *const counter);
int devm_counter_register(struct device *dev,
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index bc6c879bd110..beaed2dc269e 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -88,10 +88,13 @@ extern ssize_t arch_cpu_release(const char *, size_t);
#ifdef CONFIG_SMP
extern bool cpuhp_tasks_frozen;
-int cpu_up(unsigned int cpu);
+int add_cpu(unsigned int cpu);
+int cpu_device_up(struct device *dev);
void notify_cpu_starting(unsigned int cpu);
extern void cpu_maps_update_begin(void);
extern void cpu_maps_update_done(void);
+int bringup_hibernate_cpu(unsigned int sleep_cpu);
+void bringup_nonboot_cpus(unsigned int setup_max_cpus);
#else /* CONFIG_SMP */
#define cpuhp_tasks_frozen 0
@@ -117,7 +120,9 @@ extern void lockdep_assert_cpus_held(void);
extern void cpu_hotplug_disable(void);
extern void cpu_hotplug_enable(void);
void clear_tasks_mm_cpumask(int cpu);
-int cpu_down(unsigned int cpu);
+int remove_cpu(unsigned int cpu);
+int cpu_device_down(struct device *dev);
+extern void smp_shutdown_nonboot_cpus(unsigned int primary_cpu);
#else /* CONFIG_HOTPLUG_CPU */
@@ -129,6 +134,7 @@ static inline int cpus_read_trylock(void) { return true; }
static inline void lockdep_assert_cpus_held(void) { }
static inline void cpu_hotplug_disable(void) { }
static inline void cpu_hotplug_enable(void) { }
+static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { }
#endif /* !CONFIG_HOTPLUG_CPU */
/* Wrappers which go away once all code is converted */
@@ -138,12 +144,18 @@ static inline void get_online_cpus(void) { cpus_read_lock(); }
static inline void put_online_cpus(void) { cpus_read_unlock(); }
#ifdef CONFIG_PM_SLEEP_SMP
-extern int freeze_secondary_cpus(int primary);
+int __freeze_secondary_cpus(int primary, bool suspend);
+static inline int freeze_secondary_cpus(int primary)
+{
+ return __freeze_secondary_cpus(primary, true);
+}
+
static inline int disable_nonboot_cpus(void)
{
- return freeze_secondary_cpus(0);
+ return __freeze_secondary_cpus(0, false);
}
-extern void enable_nonboot_cpus(void);
+
+void enable_nonboot_cpus(void);
static inline int suspend_disable_secondary_cpus(void)
{
@@ -184,7 +196,12 @@ void arch_cpu_idle_dead(void);
int cpu_report_state(int cpu);
int cpu_check_up_prepare(int cpu);
void cpu_set_state_online(int cpu);
-void play_idle(unsigned long duration_us);
+void play_idle_precise(u64 duration_ns, u64 latency_ns);
+
+static inline void play_idle(unsigned long duration_us)
+{
+ play_idle_precise(duration_us * NSEC_PER_USEC, U64_MAX);
+}
#ifdef CONFIG_HOTPLUG_CPU
bool cpu_wait_death(unsigned int cpu, int seconds);
diff --git a/include/linux/cpu_cooling.h b/include/linux/cpu_cooling.h
index bae54bb7c048..65501d8f9778 100644
--- a/include/linux/cpu_cooling.h
+++ b/include/linux/cpu_cooling.h
@@ -19,7 +19,7 @@
struct cpufreq_policy;
-#ifdef CONFIG_CPU_THERMAL
+#ifdef CONFIG_CPU_FREQ_THERMAL
/**
* cpufreq_cooling_register - function to create cpufreq cooling device.
* @policy: cpufreq policy.
@@ -33,7 +33,14 @@ cpufreq_cooling_register(struct cpufreq_policy *policy);
*/
void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev);
-#else /* !CONFIG_CPU_THERMAL */
+/**
+ * of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
+ * @policy: cpufreq policy.
+ */
+struct thermal_cooling_device *
+of_cpufreq_cooling_register(struct cpufreq_policy *policy);
+
+#else /* !CONFIG_CPU_FREQ_THERMAL */
static inline struct thermal_cooling_device *
cpufreq_cooling_register(struct cpufreq_policy *policy)
{
@@ -45,21 +52,30 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
{
return;
}
-#endif /* CONFIG_CPU_THERMAL */
-#if defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL)
-/**
- * of_cpufreq_cooling_register - create cpufreq cooling device based on DT.
- * @policy: cpufreq policy.
- */
-struct thermal_cooling_device *
-of_cpufreq_cooling_register(struct cpufreq_policy *policy);
-#else
static inline struct thermal_cooling_device *
of_cpufreq_cooling_register(struct cpufreq_policy *policy)
{
return NULL;
}
-#endif /* defined(CONFIG_THERMAL_OF) && defined(CONFIG_CPU_THERMAL) */
+#endif /* CONFIG_CPU_FREQ_THERMAL */
+
+struct cpuidle_driver;
+
+#ifdef CONFIG_CPU_IDLE_THERMAL
+int cpuidle_cooling_register(struct cpuidle_driver *drv);
+int cpuidle_of_cooling_register(struct device_node *np,
+ struct cpuidle_driver *drv);
+#else /* CONFIG_CPU_IDLE_THERMAL */
+static inline int cpuidle_cooling_register(struct cpuidle_driver *drv)
+{
+ return 0;
+}
+static inline int cpuidle_of_cooling_register(struct device_node *np,
+ struct cpuidle_driver *drv)
+{
+ return 0;
+}
+#endif /* CONFIG_CPU_IDLE_THERMAL */
#endif /* __CPU_COOLING_H__ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 92d5fdc8154e..f7240251a949 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -148,6 +148,20 @@ struct cpufreq_policy {
struct notifier_block nb_max;
};
+/*
+ * Used for passing new cpufreq policy data to the cpufreq driver's ->verify()
+ * callback for sanitization. That callback is only expected to modify the min
+ * and max values, if necessary, and specifically it must not update the
+ * frequency table.
+ */
+struct cpufreq_policy_data {
+ struct cpufreq_cpuinfo cpuinfo;
+ struct cpufreq_frequency_table *freq_table;
+ unsigned int cpu;
+ unsigned int min; /* in kHz */
+ unsigned int max; /* in kHz */
+};
+
struct cpufreq_freqs {
struct cpufreq_policy *policy;
unsigned int old;
@@ -187,13 +201,11 @@ static inline bool policy_is_shared(struct cpufreq_policy *policy)
return cpumask_weight(policy->cpus) > 1;
}
-/* /sys/devices/system/cpu/cpufreq: entry point for global variables */
-extern struct kobject *cpufreq_global_kobject;
-
#ifdef CONFIG_CPU_FREQ
unsigned int cpufreq_get(unsigned int cpu);
unsigned int cpufreq_quick_get(unsigned int cpu);
unsigned int cpufreq_quick_get_max(unsigned int cpu);
+unsigned int cpufreq_get_hw_max_freq(unsigned int cpu);
void disable_cpufreq(void);
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
@@ -201,8 +213,6 @@ u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy);
struct cpufreq_policy *cpufreq_cpu_acquire(unsigned int cpu);
void cpufreq_cpu_release(struct cpufreq_policy *policy);
int cpufreq_get_policy(struct cpufreq_policy *policy, unsigned int cpu);
-int cpufreq_set_policy(struct cpufreq_policy *policy,
- struct cpufreq_policy *new_policy);
void refresh_frequency_limits(struct cpufreq_policy *policy);
void cpufreq_update_policy(unsigned int cpu);
void cpufreq_update_limits(unsigned int cpu);
@@ -223,6 +233,10 @@ static inline unsigned int cpufreq_quick_get_max(unsigned int cpu)
{
return 0;
}
+static inline unsigned int cpufreq_get_hw_max_freq(unsigned int cpu)
+{
+ return 0;
+}
static inline void disable_cpufreq(void) { }
#endif
@@ -284,7 +298,7 @@ struct cpufreq_driver {
/* needed by all drivers */
int (*init)(struct cpufreq_policy *policy);
- int (*verify)(struct cpufreq_policy *policy);
+ int (*verify)(struct cpufreq_policy_data *policy);
/* define one out of two */
int (*setpolicy)(struct cpufreq_policy *policy);
@@ -415,8 +429,9 @@ static inline int cpufreq_thermal_control_enabled(struct cpufreq_driver *drv)
(drv->flags & CPUFREQ_IS_COOLING_DEV);
}
-static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
- unsigned int min, unsigned int max)
+static inline void cpufreq_verify_within_limits(struct cpufreq_policy_data *policy,
+ unsigned int min,
+ unsigned int max)
{
if (policy->min < min)
policy->min = min;
@@ -432,10 +447,10 @@ static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
}
static inline void
-cpufreq_verify_within_cpu_limits(struct cpufreq_policy *policy)
+cpufreq_verify_within_cpu_limits(struct cpufreq_policy_data *policy)
{
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
- policy->cpuinfo.max_freq);
+ policy->cpuinfo.max_freq);
}
#ifdef CONFIG_CPU_FREQ
@@ -513,6 +528,7 @@ static inline unsigned long cpufreq_scale(unsigned long old, u_int div,
* CPUFREQ GOVERNORS *
*********************************************************************/
+#define CPUFREQ_POLICY_UNKNOWN (0)
/*
* If (cpufreq_driver->target) exists, the ->governor decides what frequency
* within the limits is used. If (cpufreq_driver->setpolicy> exists, these
@@ -595,17 +611,6 @@ struct governor_attr {
size_t count);
};
-static inline bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy)
-{
- /*
- * Allow remote callbacks if:
- * - dvfs_possible_from_any_cpu flag is set
- * - the local and remote CPUs share cpufreq policy
- */
- return policy->dvfs_possible_from_any_cpu ||
- cpumask_test_cpu(smp_processor_id(), policy->cpus);
-}
-
/*********************************************************************
* FREQUENCY TABLE HELPERS *
*********************************************************************/
@@ -695,9 +700,9 @@ static inline void dev_pm_opp_free_cpufreq_table(struct device *dev,
int cpufreq_frequency_table_cpuinfo(struct cpufreq_policy *policy,
struct cpufreq_frequency_table *table);
-int cpufreq_frequency_table_verify(struct cpufreq_policy *policy,
+int cpufreq_frequency_table_verify(struct cpufreq_policy_data *policy,
struct cpufreq_frequency_table *table);
-int cpufreq_generic_frequency_table_verify(struct cpufreq_policy *policy);
+int cpufreq_generic_frequency_table_verify(struct cpufreq_policy_data *policy);
int cpufreq_table_index_unsorted(struct cpufreq_policy *policy,
unsigned int target_freq,
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 068793a619ca..77d70b633531 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -59,6 +59,7 @@ enum cpuhp_state {
CPUHP_IOMMU_INTEL_DEAD,
CPUHP_LUSTRE_CFS_DEAD,
CPUHP_AP_ARM_CACHE_B15_RAC_DEAD,
+ CPUHP_PADATA_DEAD,
CPUHP_WORKQUEUE_PREP,
CPUHP_POWER_NUMA_PREPARE,
CPUHP_HRTIMERS_PREPARE,
@@ -95,11 +96,13 @@ enum cpuhp_state {
CPUHP_AP_OFFLINE,
CPUHP_AP_SCHED_STARTING,
CPUHP_AP_RCUTREE_DYING,
+ CPUHP_AP_CPU_PM_STARTING,
CPUHP_AP_IRQ_GIC_STARTING,
CPUHP_AP_IRQ_HIP04_STARTING,
CPUHP_AP_IRQ_ARMADA_XP_STARTING,
CPUHP_AP_IRQ_BCM2836_STARTING,
CPUHP_AP_IRQ_MIPS_GIC_STARTING,
+ CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING,
CPUHP_AP_ARM_MVEBU_COHERENCY,
CPUHP_AP_MICROCODE_LOADER,
CPUHP_AP_PERF_X86_AMD_UNCORE_STARTING,
@@ -129,6 +132,7 @@ enum cpuhp_state {
CPUHP_AP_ARC_TIMER_STARTING,
CPUHP_AP_RISCV_TIMER_STARTING,
CPUHP_AP_CSKY_TIMER_STARTING,
+ CPUHP_AP_HYPERV_TIMER_STARTING,
CPUHP_AP_KVM_STARTING,
CPUHP_AP_KVM_ARM_VGIC_INIT_STARTING,
CPUHP_AP_KVM_ARM_VGIC_STARTING,
@@ -136,6 +140,7 @@ enum cpuhp_state {
/* Must be the last timer callback */
CPUHP_AP_DUMMY_TIMER_STARTING,
CPUHP_AP_ARM_XEN_STARTING,
+ CPUHP_AP_ARM_KVMPV_STARTING,
CPUHP_AP_ARM_CORESIGHT_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING,
CPUHP_AP_SMPCFD_DYING,
diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
index 4b6b5bea8f79..ec2ef63771f0 100644
--- a/include/linux/cpuidle.h
+++ b/include/linux/cpuidle.h
@@ -29,10 +29,13 @@ struct cpuidle_driver;
* CPUIDLE DEVICE INTERFACE *
****************************/
+#define CPUIDLE_STATE_DISABLED_BY_USER BIT(0)
+#define CPUIDLE_STATE_DISABLED_BY_DRIVER BIT(1)
+
struct cpuidle_state_usage {
unsigned long long disable;
unsigned long long usage;
- unsigned long long time; /* in US */
+ u64 time_ns;
unsigned long long above; /* Number of times it's been too deep */
unsigned long long below; /* Number of times it's been too shallow */
#ifdef CONFIG_SUSPEND
@@ -45,11 +48,12 @@ struct cpuidle_state {
char name[CPUIDLE_NAME_LEN];
char desc[CPUIDLE_DESC_LEN];
+ u64 exit_latency_ns;
+ u64 target_residency_ns;
unsigned int flags;
unsigned int exit_latency; /* in US */
int power_usage; /* in mW */
unsigned int target_residency; /* in US */
- bool disabled; /* disabled on all CPUs */
int (*enter) (struct cpuidle_device *dev,
struct cpuidle_driver *drv,
@@ -72,6 +76,8 @@ struct cpuidle_state {
#define CPUIDLE_FLAG_POLLING BIT(0) /* polling state */
#define CPUIDLE_FLAG_COUPLED BIT(1) /* state applies to multiple cpus */
#define CPUIDLE_FLAG_TIMER_STOP BIT(2) /* timer is stopped on this state */
+#define CPUIDLE_FLAG_UNUSABLE BIT(3) /* avoid using this state */
+#define CPUIDLE_FLAG_OFF BIT(4) /* disable this state by default */
struct cpuidle_device_kobj;
struct cpuidle_state_kobj;
@@ -80,14 +86,14 @@ struct cpuidle_driver_kobj;
struct cpuidle_device {
unsigned int registered:1;
unsigned int enabled:1;
- unsigned int use_deepest_state:1;
unsigned int poll_time_limit:1;
unsigned int cpu;
ktime_t next_hrtimer;
int last_state_idx;
- int last_residency;
+ u64 last_residency_ns;
u64 poll_limit_ns;
+ u64 forced_idle_latency_limit_ns;
struct cpuidle_state_usage states_usage[CPUIDLE_STATE_MAX];
struct cpuidle_state_kobj *kobjs[CPUIDLE_STATE_MAX];
struct cpuidle_driver_kobj *kobj_driver;
@@ -110,7 +116,6 @@ DECLARE_PER_CPU(struct cpuidle_device, cpuidle_dev);
struct cpuidle_driver {
const char *name;
struct module *owner;
- int refcnt;
/* used by the cpuidle framework to setup the broadcast timer */
unsigned int bctimer:1;
@@ -142,8 +147,8 @@ extern u64 cpuidle_poll_time(struct cpuidle_driver *drv,
extern int cpuidle_register_driver(struct cpuidle_driver *drv);
extern struct cpuidle_driver *cpuidle_get_driver(void);
-extern struct cpuidle_driver *cpuidle_driver_ref(void);
-extern void cpuidle_driver_unref(void);
+extern void cpuidle_driver_state_disabled(struct cpuidle_driver *drv, int idx,
+ bool disable);
extern void cpuidle_unregister_driver(struct cpuidle_driver *drv);
extern int cpuidle_register_device(struct cpuidle_device *dev);
extern void cpuidle_unregister_device(struct cpuidle_device *dev);
@@ -179,8 +184,8 @@ static inline u64 cpuidle_poll_time(struct cpuidle_driver *drv,
static inline int cpuidle_register_driver(struct cpuidle_driver *drv)
{return -ENODEV; }
static inline struct cpuidle_driver *cpuidle_get_driver(void) {return NULL; }
-static inline struct cpuidle_driver *cpuidle_driver_ref(void) {return NULL; }
-static inline void cpuidle_driver_unref(void) {}
+static inline void cpuidle_driver_state_disabled(struct cpuidle_driver *drv,
+ int idx, bool disable) { }
static inline void cpuidle_unregister_driver(struct cpuidle_driver *drv) { }
static inline int cpuidle_register_device(struct cpuidle_device *dev)
{return -ENODEV; }
@@ -204,18 +209,20 @@ static inline struct cpuidle_device *cpuidle_get_device(void) {return NULL; }
#ifdef CONFIG_CPU_IDLE
extern int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
- struct cpuidle_device *dev);
+ struct cpuidle_device *dev,
+ u64 latency_limit_ns);
extern int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev);
-extern void cpuidle_use_deepest_state(bool enable);
+extern void cpuidle_use_deepest_state(u64 latency_limit_ns);
#else
static inline int cpuidle_find_deepest_state(struct cpuidle_driver *drv,
- struct cpuidle_device *dev)
+ struct cpuidle_device *dev,
+ u64 latency_limit_ns)
{return -ENODEV; }
static inline int cpuidle_enter_s2idle(struct cpuidle_driver *drv,
struct cpuidle_device *dev)
{return -ENODEV; }
-static inline void cpuidle_use_deepest_state(bool enable)
+static inline void cpuidle_use_deepest_state(u64 latency_limit_ns)
{
}
#endif
@@ -260,7 +267,7 @@ struct cpuidle_governor {
#ifdef CONFIG_CPU_IDLE
extern int cpuidle_register_governor(struct cpuidle_governor *gov);
-extern int cpuidle_governor_latency_req(unsigned int cpu);
+extern s64 cpuidle_governor_latency_req(unsigned int cpu);
#else
static inline int cpuidle_register_governor(struct cpuidle_governor *gov)
{return 0;}
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 78a73eba64dd..f0d895d6ac39 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -194,6 +194,11 @@ static inline unsigned int cpumask_local_spread(unsigned int i, int node)
return 0;
}
+static inline int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p) {
+ return cpumask_next_and(-1, src1p, src2p);
+}
+
#define for_each_cpu(cpu, mask) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
#define for_each_cpu_not(cpu, mask) \
@@ -245,6 +250,8 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
unsigned int cpumask_local_spread(unsigned int i, int node);
+int cpumask_any_and_distribute(const struct cpumask *src1p,
+ const struct cpumask *src2p);
/**
* for_each_cpu - iterate over every cpu in a mask
@@ -663,9 +670,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
*/
static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
{
- unsigned int len = strchrnul(buf, '\n') - buf;
-
- return bitmap_parse(buf, len, cpumask_bits(dstp), nr_cpumask_bits);
+ return bitmap_parse(buf, UINT_MAX, cpumask_bits(dstp), nr_cpumask_bits);
}
/**
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 19ea3a371d7b..763863dbc079 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -41,8 +41,6 @@
#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002
#define CRYPTO_ALG_TYPE_AEAD 0x00000003
-#define CRYPTO_ALG_TYPE_BLKCIPHER 0x00000004
-#define CRYPTO_ALG_TYPE_ABLKCIPHER 0x00000005
#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
#define CRYPTO_ALG_TYPE_KPP 0x00000008
#define CRYPTO_ALG_TYPE_ACOMPRESS 0x0000000a
@@ -55,7 +53,6 @@
#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
-#define CRYPTO_ALG_TYPE_BLKCIPHER_MASK 0x0000000c
#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
#define CRYPTO_ALG_LARVAL 0x00000010
@@ -110,16 +107,9 @@
#define CRYPTO_TFM_NEED_KEY 0x00000001
#define CRYPTO_TFM_REQ_MASK 0x000fff00
-#define CRYPTO_TFM_RES_MASK 0xfff00000
-
#define CRYPTO_TFM_REQ_FORBID_WEAK_KEYS 0x00000100
#define CRYPTO_TFM_REQ_MAY_SLEEP 0x00000200
#define CRYPTO_TFM_REQ_MAY_BACKLOG 0x00000400
-#define CRYPTO_TFM_RES_WEAK_KEY 0x00100000
-#define CRYPTO_TFM_RES_BAD_KEY_LEN 0x00200000
-#define CRYPTO_TFM_RES_BAD_KEY_SCHED 0x00400000
-#define CRYPTO_TFM_RES_BAD_BLOCK_LEN 0x00800000
-#define CRYPTO_TFM_RES_BAD_FLAGS 0x01000000
/*
* Miscellaneous stuff.
@@ -139,9 +129,7 @@
#define CRYPTO_MINALIGN_ATTR __attribute__ ((__aligned__(CRYPTO_MINALIGN)))
struct scatterlist;
-struct crypto_ablkcipher;
struct crypto_async_request;
-struct crypto_blkcipher;
struct crypto_tfm;
struct crypto_type;
@@ -163,25 +151,6 @@ struct crypto_async_request {
u32 flags;
};
-struct ablkcipher_request {
- struct crypto_async_request base;
-
- unsigned int nbytes;
-
- void *info;
-
- struct scatterlist *src;
- struct scatterlist *dst;
-
- void *__ctx[] CRYPTO_MINALIGN_ATTR;
-};
-
-struct blkcipher_desc {
- struct crypto_blkcipher *tfm;
- void *info;
- u32 flags;
-};
-
/**
* DOC: Block Cipher Algorithm Definitions
*
@@ -190,83 +159,6 @@ struct blkcipher_desc {
*/
/**
- * struct ablkcipher_alg - asynchronous block cipher definition
- * @min_keysize: Minimum key size supported by the transformation. This is the
- * smallest key length supported by this transformation algorithm.
- * This must be set to one of the pre-defined values as this is
- * not hardware specific. Possible values for this field can be
- * found via git grep "_MIN_KEY_SIZE" include/crypto/
- * @max_keysize: Maximum key size supported by the transformation. This is the
- * largest key length supported by this transformation algorithm.
- * This must be set to one of the pre-defined values as this is
- * not hardware specific. Possible values for this field can be
- * found via git grep "_MAX_KEY_SIZE" include/crypto/
- * @setkey: Set key for the transformation. This function is used to either
- * program a supplied key into the hardware or store the key in the
- * transformation context for programming it later. Note that this
- * function does modify the transformation context. This function can
- * be called multiple times during the existence of the transformation
- * object, so one must make sure the key is properly reprogrammed into
- * the hardware. This function is also responsible for checking the key
- * length for validity. In case a software fallback was put in place in
- * the @cra_init call, this function might need to use the fallback if
- * the algorithm doesn't support all of the key sizes.
- * @encrypt: Encrypt a scatterlist of blocks. This function is used to encrypt
- * the supplied scatterlist containing the blocks of data. The crypto
- * API consumer is responsible for aligning the entries of the
- * scatterlist properly and making sure the chunks are correctly
- * sized. In case a software fallback was put in place in the
- * @cra_init call, this function might need to use the fallback if
- * the algorithm doesn't support all of the key sizes. In case the
- * key was stored in transformation context, the key might need to be
- * re-programmed into the hardware in this function. This function
- * shall not modify the transformation context, as this function may
- * be called in parallel with the same transformation object.
- * @decrypt: Decrypt a single block. This is a reverse counterpart to @encrypt
- * and the conditions are exactly the same.
- * @ivsize: IV size applicable for transformation. The consumer must provide an
- * IV of exactly that size to perform the encrypt or decrypt operation.
- *
- * All fields except @ivsize are mandatory and must be filled.
- */
-struct ablkcipher_alg {
- int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct ablkcipher_request *req);
- int (*decrypt)(struct ablkcipher_request *req);
-
- unsigned int min_keysize;
- unsigned int max_keysize;
- unsigned int ivsize;
-};
-
-/**
- * struct blkcipher_alg - synchronous block cipher definition
- * @min_keysize: see struct ablkcipher_alg
- * @max_keysize: see struct ablkcipher_alg
- * @setkey: see struct ablkcipher_alg
- * @encrypt: see struct ablkcipher_alg
- * @decrypt: see struct ablkcipher_alg
- * @ivsize: see struct ablkcipher_alg
- *
- * All fields except @ivsize are mandatory and must be filled.
- */
-struct blkcipher_alg {
- int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes);
- int (*decrypt)(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes);
-
- unsigned int min_keysize;
- unsigned int max_keysize;
- unsigned int ivsize;
-};
-
-/**
* struct cipher_alg - single-block symmetric ciphers definition
* @cia_min_keysize: Minimum key size supported by the transformation. This is
* the smallest key length supported by this transformation
@@ -450,8 +342,6 @@ struct crypto_istat_rng {
};
#endif /* CONFIG_CRYPTO_STATS */
-#define cra_ablkcipher cra_u.ablkcipher
-#define cra_blkcipher cra_u.blkcipher
#define cra_cipher cra_u.cipher
#define cra_compress cra_u.compress
@@ -499,9 +389,8 @@ struct crypto_istat_rng {
* transformation algorithm.
* @cra_type: Type of the cryptographic transformation. This is a pointer to
* struct crypto_type, which implements callbacks common for all
- * transformation types. There are multiple options:
- * &crypto_blkcipher_type, &crypto_ablkcipher_type,
- * &crypto_ahash_type, &crypto_rng_type.
+ * transformation types. There are multiple options, such as
+ * &crypto_skcipher_type, &crypto_ahash_type, &crypto_rng_type.
* This field might be empty. In that case, there are no common
* callbacks. This is the case for: cipher, compress, shash.
* @cra_u: Callbacks implementing the transformation. This is a union of
@@ -520,10 +409,6 @@ struct crypto_istat_rng {
* @cra_exit: Deinitialize the cryptographic transformation object. This is a
* counterpart to @cra_init, used to remove various changes set in
* @cra_init.
- * @cra_u.ablkcipher: Union member which contains an asynchronous block cipher
- * definition. See @struct @ablkcipher_alg.
- * @cra_u.blkcipher: Union member which contains a synchronous block cipher
- * definition See @struct @blkcipher_alg.
* @cra_u.cipher: Union member which contains a single-block symmetric cipher
* definition. See @struct @cipher_alg.
* @cra_u.compress: Union member which contains a (de)compression algorithm.
@@ -565,8 +450,6 @@ struct crypto_alg {
const struct crypto_type *cra_type;
union {
- struct ablkcipher_alg ablkcipher;
- struct blkcipher_alg blkcipher;
struct cipher_alg cipher;
struct compress_alg compress;
} cra_u;
@@ -594,8 +477,6 @@ struct crypto_alg {
#ifdef CONFIG_CRYPTO_STATS
void crypto_stats_init(struct crypto_alg *alg);
void crypto_stats_get(struct crypto_alg *alg);
-void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg);
-void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg);
void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret);
void crypto_stats_ahash_update(unsigned int nbytes, int ret, struct crypto_alg *alg);
@@ -618,10 +499,6 @@ static inline void crypto_stats_init(struct crypto_alg *alg)
{}
static inline void crypto_stats_get(struct crypto_alg *alg)
{}
-static inline void crypto_stats_ablkcipher_encrypt(unsigned int nbytes, int ret, struct crypto_alg *alg)
-{}
-static inline void crypto_stats_ablkcipher_decrypt(unsigned int nbytes, int ret, struct crypto_alg *alg)
-{}
static inline void crypto_stats_aead_encrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
{}
static inline void crypto_stats_aead_decrypt(unsigned int cryptlen, struct crypto_alg *alg, int ret)
@@ -686,7 +563,7 @@ static inline int crypto_wait_req(int err, struct crypto_wait *wait)
reinit_completion(&wait->completion);
err = wait->err;
break;
- };
+ }
return err;
}
@@ -700,9 +577,9 @@ static inline void crypto_init_wait(struct crypto_wait *wait)
* Algorithm registration interface.
*/
int crypto_register_alg(struct crypto_alg *alg);
-int crypto_unregister_alg(struct crypto_alg *alg);
+void crypto_unregister_alg(struct crypto_alg *alg);
int crypto_register_algs(struct crypto_alg *algs, int count);
-int crypto_unregister_algs(struct crypto_alg *algs, int count);
+void crypto_unregister_algs(struct crypto_alg *algs, int count);
/*
* Algorithm query interface.
@@ -715,60 +592,10 @@ int crypto_has_alg(const char *name, u32 type, u32 mask);
* crypto_free_*(), as well as the various helpers below.
*/
-struct ablkcipher_tfm {
- int (*setkey)(struct crypto_ablkcipher *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct ablkcipher_request *req);
- int (*decrypt)(struct ablkcipher_request *req);
-
- struct crypto_ablkcipher *base;
-
- unsigned int ivsize;
- unsigned int reqsize;
-};
-
-struct blkcipher_tfm {
- void *iv;
- int (*setkey)(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen);
- int (*encrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes);
- int (*decrypt)(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes);
-};
-
-struct cipher_tfm {
- int (*cit_setkey)(struct crypto_tfm *tfm,
- const u8 *key, unsigned int keylen);
- void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
- void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
-};
-
-struct compress_tfm {
- int (*cot_compress)(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
- int (*cot_decompress)(struct crypto_tfm *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen);
-};
-
-#define crt_ablkcipher crt_u.ablkcipher
-#define crt_blkcipher crt_u.blkcipher
-#define crt_cipher crt_u.cipher
-#define crt_compress crt_u.compress
-
struct crypto_tfm {
u32 crt_flags;
- union {
- struct ablkcipher_tfm ablkcipher;
- struct blkcipher_tfm blkcipher;
- struct cipher_tfm cipher;
- struct compress_tfm compress;
- } crt_u;
-
void (*exit)(struct crypto_tfm *tfm);
struct crypto_alg *__crt_alg;
@@ -776,14 +603,6 @@ struct crypto_tfm {
void *__crt_ctx[] CRYPTO_MINALIGN_ATTR;
};
-struct crypto_ablkcipher {
- struct crypto_tfm base;
-};
-
-struct crypto_blkcipher {
- struct crypto_tfm base;
-};
-
struct crypto_cipher {
struct crypto_tfm base;
};
@@ -891,713 +710,6 @@ static inline unsigned int crypto_tfm_ctx_alignment(void)
return __alignof__(tfm->__crt_ctx);
}
-/*
- * API wrappers.
- */
-static inline struct crypto_ablkcipher *__crypto_ablkcipher_cast(
- struct crypto_tfm *tfm)
-{
- return (struct crypto_ablkcipher *)tfm;
-}
-
-static inline u32 crypto_skcipher_type(u32 type)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_BLKCIPHER;
- return type;
-}
-
-static inline u32 crypto_skcipher_mask(u32 mask)
-{
- mask &= ~CRYPTO_ALG_TYPE_MASK;
- mask |= CRYPTO_ALG_TYPE_BLKCIPHER_MASK;
- return mask;
-}
-
-/**
- * DOC: Asynchronous Block Cipher API
- *
- * Asynchronous block cipher API is used with the ciphers of type
- * CRYPTO_ALG_TYPE_ABLKCIPHER (listed as type "ablkcipher" in /proc/crypto).
- *
- * Asynchronous cipher operations imply that the function invocation for a
- * cipher request returns immediately before the completion of the operation.
- * The cipher request is scheduled as a separate kernel thread and therefore
- * load-balanced on the different CPUs via the process scheduler. To allow
- * the kernel crypto API to inform the caller about the completion of a cipher
- * request, the caller must provide a callback function. That function is
- * invoked with the cipher handle when the request completes.
- *
- * To support the asynchronous operation, additional information than just the
- * cipher handle must be supplied to the kernel crypto API. That additional
- * information is given by filling in the ablkcipher_request data structure.
- *
- * For the asynchronous block cipher API, the state is maintained with the tfm
- * cipher handle. A single tfm can be used across multiple calls and in
- * parallel. For asynchronous block cipher calls, context data supplied and
- * only used by the caller can be referenced the request data structure in
- * addition to the IV used for the cipher request. The maintenance of such
- * state information would be important for a crypto driver implementer to
- * have, because when calling the callback function upon completion of the
- * cipher operation, that callback function may need some information about
- * which operation just finished if it invoked multiple in parallel. This
- * state information is unused by the kernel crypto API.
- */
-
-static inline struct crypto_tfm *crypto_ablkcipher_tfm(
- struct crypto_ablkcipher *tfm)
-{
- return &tfm->base;
-}
-
-/**
- * crypto_free_ablkcipher() - zeroize and free cipher handle
- * @tfm: cipher handle to be freed
- */
-static inline void crypto_free_ablkcipher(struct crypto_ablkcipher *tfm)
-{
- crypto_free_tfm(crypto_ablkcipher_tfm(tfm));
-}
-
-/**
- * crypto_has_ablkcipher() - Search for the availability of an ablkcipher.
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * ablkcipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Return: true when the ablkcipher is known to the kernel crypto API; false
- * otherwise
- */
-static inline int crypto_has_ablkcipher(const char *alg_name, u32 type,
- u32 mask)
-{
- return crypto_has_alg(alg_name, crypto_skcipher_type(type),
- crypto_skcipher_mask(mask));
-}
-
-static inline struct ablkcipher_tfm *crypto_ablkcipher_crt(
- struct crypto_ablkcipher *tfm)
-{
- return &crypto_ablkcipher_tfm(tfm)->crt_ablkcipher;
-}
-
-/**
- * crypto_ablkcipher_ivsize() - obtain IV size
- * @tfm: cipher handle
- *
- * The size of the IV for the ablkcipher referenced by the cipher handle is
- * returned. This IV size may be zero if the cipher does not need an IV.
- *
- * Return: IV size in bytes
- */
-static inline unsigned int crypto_ablkcipher_ivsize(
- struct crypto_ablkcipher *tfm)
-{
- return crypto_ablkcipher_crt(tfm)->ivsize;
-}
-
-/**
- * crypto_ablkcipher_blocksize() - obtain block size of cipher
- * @tfm: cipher handle
- *
- * The block size for the ablkcipher referenced with the cipher handle is
- * returned. The caller may use that information to allocate appropriate
- * memory for the data returned by the encryption or decryption operation
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_ablkcipher_blocksize(
- struct crypto_ablkcipher *tfm)
-{
- return crypto_tfm_alg_blocksize(crypto_ablkcipher_tfm(tfm));
-}
-
-static inline unsigned int crypto_ablkcipher_alignmask(
- struct crypto_ablkcipher *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_ablkcipher_tfm(tfm));
-}
-
-static inline u32 crypto_ablkcipher_get_flags(struct crypto_ablkcipher *tfm)
-{
- return crypto_tfm_get_flags(crypto_ablkcipher_tfm(tfm));
-}
-
-static inline void crypto_ablkcipher_set_flags(struct crypto_ablkcipher *tfm,
- u32 flags)
-{
- crypto_tfm_set_flags(crypto_ablkcipher_tfm(tfm), flags);
-}
-
-static inline void crypto_ablkcipher_clear_flags(struct crypto_ablkcipher *tfm,
- u32 flags)
-{
- crypto_tfm_clear_flags(crypto_ablkcipher_tfm(tfm), flags);
-}
-
-/**
- * crypto_ablkcipher_setkey() - set key for cipher
- * @tfm: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the ablkcipher referenced by the cipher
- * handle.
- *
- * Note, the key length determines the cipher type. Many block ciphers implement
- * different cipher modes depending on the key size, such as AES-128 vs AES-192
- * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
- * is performed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-static inline int crypto_ablkcipher_setkey(struct crypto_ablkcipher *tfm,
- const u8 *key, unsigned int keylen)
-{
- struct ablkcipher_tfm *crt = crypto_ablkcipher_crt(tfm);
-
- return crt->setkey(crt->base, key, keylen);
-}
-
-/**
- * crypto_ablkcipher_reqtfm() - obtain cipher handle from request
- * @req: ablkcipher_request out of which the cipher handle is to be obtained
- *
- * Return the crypto_ablkcipher handle when furnishing an ablkcipher_request
- * data structure.
- *
- * Return: crypto_ablkcipher handle
- */
-static inline struct crypto_ablkcipher *crypto_ablkcipher_reqtfm(
- struct ablkcipher_request *req)
-{
- return __crypto_ablkcipher_cast(req->base.tfm);
-}
-
-/**
- * crypto_ablkcipher_encrypt() - encrypt plaintext
- * @req: reference to the ablkcipher_request handle that holds all information
- * needed to perform the cipher operation
- *
- * Encrypt plaintext data using the ablkcipher_request handle. That data
- * structure and how it is filled with data is discussed with the
- * ablkcipher_request_* functions.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_ablkcipher_encrypt(struct ablkcipher_request *req)
-{
- struct ablkcipher_tfm *crt =
- crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
- struct crypto_alg *alg = crt->base->base.__crt_alg;
- unsigned int nbytes = req->nbytes;
- int ret;
-
- crypto_stats_get(alg);
- ret = crt->encrypt(req);
- crypto_stats_ablkcipher_encrypt(nbytes, ret, alg);
- return ret;
-}
-
-/**
- * crypto_ablkcipher_decrypt() - decrypt ciphertext
- * @req: reference to the ablkcipher_request handle that holds all information
- * needed to perform the cipher operation
- *
- * Decrypt ciphertext data using the ablkcipher_request handle. That data
- * structure and how it is filled with data is discussed with the
- * ablkcipher_request_* functions.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_ablkcipher_decrypt(struct ablkcipher_request *req)
-{
- struct ablkcipher_tfm *crt =
- crypto_ablkcipher_crt(crypto_ablkcipher_reqtfm(req));
- struct crypto_alg *alg = crt->base->base.__crt_alg;
- unsigned int nbytes = req->nbytes;
- int ret;
-
- crypto_stats_get(alg);
- ret = crt->decrypt(req);
- crypto_stats_ablkcipher_decrypt(nbytes, ret, alg);
- return ret;
-}
-
-/**
- * DOC: Asynchronous Cipher Request Handle
- *
- * The ablkcipher_request data structure contains all pointers to data
- * required for the asynchronous cipher operation. This includes the cipher
- * handle (which can be used by multiple ablkcipher_request instances), pointer
- * to plaintext and ciphertext, asynchronous callback function, etc. It acts
- * as a handle to the ablkcipher_request_* API calls in a similar way as
- * ablkcipher handle to the crypto_ablkcipher_* API calls.
- */
-
-/**
- * crypto_ablkcipher_reqsize() - obtain size of the request data structure
- * @tfm: cipher handle
- *
- * Return: number of bytes
- */
-static inline unsigned int crypto_ablkcipher_reqsize(
- struct crypto_ablkcipher *tfm)
-{
- return crypto_ablkcipher_crt(tfm)->reqsize;
-}
-
-/**
- * ablkcipher_request_set_tfm() - update cipher handle reference in request
- * @req: request handle to be modified
- * @tfm: cipher handle that shall be added to the request handle
- *
- * Allow the caller to replace the existing ablkcipher handle in the request
- * data structure with a different one.
- */
-static inline void ablkcipher_request_set_tfm(
- struct ablkcipher_request *req, struct crypto_ablkcipher *tfm)
-{
- req->base.tfm = crypto_ablkcipher_tfm(crypto_ablkcipher_crt(tfm)->base);
-}
-
-static inline struct ablkcipher_request *ablkcipher_request_cast(
- struct crypto_async_request *req)
-{
- return container_of(req, struct ablkcipher_request, base);
-}
-
-/**
- * ablkcipher_request_alloc() - allocate request data structure
- * @tfm: cipher handle to be registered with the request
- * @gfp: memory allocation flag that is handed to kmalloc by the API call.
- *
- * Allocate the request data structure that must be used with the ablkcipher
- * encrypt and decrypt API calls. During the allocation, the provided ablkcipher
- * handle is registered in the request data structure.
- *
- * Return: allocated request handle in case of success, or NULL if out of memory
- */
-static inline struct ablkcipher_request *ablkcipher_request_alloc(
- struct crypto_ablkcipher *tfm, gfp_t gfp)
-{
- struct ablkcipher_request *req;
-
- req = kmalloc(sizeof(struct ablkcipher_request) +
- crypto_ablkcipher_reqsize(tfm), gfp);
-
- if (likely(req))
- ablkcipher_request_set_tfm(req, tfm);
-
- return req;
-}
-
-/**
- * ablkcipher_request_free() - zeroize and free request data structure
- * @req: request data structure cipher handle to be freed
- */
-static inline void ablkcipher_request_free(struct ablkcipher_request *req)
-{
- kzfree(req);
-}
-
-/**
- * ablkcipher_request_set_callback() - set asynchronous callback function
- * @req: request handle
- * @flags: specify zero or an ORing of the flags
- * CRYPTO_TFM_REQ_MAY_BACKLOG the request queue may back log and
- * increase the wait queue beyond the initial maximum size;
- * CRYPTO_TFM_REQ_MAY_SLEEP the request processing may sleep
- * @compl: callback function pointer to be registered with the request handle
- * @data: The data pointer refers to memory that is not used by the kernel
- * crypto API, but provided to the callback function for it to use. Here,
- * the caller can provide a reference to memory the callback function can
- * operate on. As the callback function is invoked asynchronously to the
- * related functionality, it may need to access data structures of the
- * related functionality which can be referenced using this pointer. The
- * callback function can access the memory via the "data" field in the
- * crypto_async_request data structure provided to the callback function.
- *
- * This function allows setting the callback function that is triggered once the
- * cipher operation completes.
- *
- * The callback function is registered with the ablkcipher_request handle and
- * must comply with the following template::
- *
- * void callback_function(struct crypto_async_request *req, int error)
- */
-static inline void ablkcipher_request_set_callback(
- struct ablkcipher_request *req,
- u32 flags, crypto_completion_t compl, void *data)
-{
- req->base.complete = compl;
- req->base.data = data;
- req->base.flags = flags;
-}
-
-/**
- * ablkcipher_request_set_crypt() - set data buffers
- * @req: request handle
- * @src: source scatter / gather list
- * @dst: destination scatter / gather list
- * @nbytes: number of bytes to process from @src
- * @iv: IV for the cipher operation which must comply with the IV size defined
- * by crypto_ablkcipher_ivsize
- *
- * This function allows setting of the source data and destination data
- * scatter / gather lists.
- *
- * For encryption, the source is treated as the plaintext and the
- * destination is the ciphertext. For a decryption operation, the use is
- * reversed - the source is the ciphertext and the destination is the plaintext.
- */
-static inline void ablkcipher_request_set_crypt(
- struct ablkcipher_request *req,
- struct scatterlist *src, struct scatterlist *dst,
- unsigned int nbytes, void *iv)
-{
- req->src = src;
- req->dst = dst;
- req->nbytes = nbytes;
- req->info = iv;
-}
-
-/**
- * DOC: Synchronous Block Cipher API
- *
- * The synchronous block cipher API is used with the ciphers of type
- * CRYPTO_ALG_TYPE_BLKCIPHER (listed as type "blkcipher" in /proc/crypto)
- *
- * Synchronous calls, have a context in the tfm. But since a single tfm can be
- * used in multiple calls and in parallel, this info should not be changeable
- * (unless a lock is used). This applies, for example, to the symmetric key.
- * However, the IV is changeable, so there is an iv field in blkcipher_tfm
- * structure for synchronous blkcipher api. So, its the only state info that can
- * be kept for synchronous calls without using a big lock across a tfm.
- *
- * The block cipher API allows the use of a complete cipher, i.e. a cipher
- * consisting of a template (a block chaining mode) and a single block cipher
- * primitive (e.g. AES).
- *
- * The plaintext data buffer and the ciphertext data buffer are pointed to
- * by using scatter/gather lists. The cipher operation is performed
- * on all segments of the provided scatter/gather lists.
- *
- * The kernel crypto API supports a cipher operation "in-place" which means that
- * the caller may provide the same scatter/gather list for the plaintext and
- * cipher text. After the completion of the cipher operation, the plaintext
- * data is replaced with the ciphertext data in case of an encryption and vice
- * versa for a decryption. The caller must ensure that the scatter/gather lists
- * for the output data point to sufficiently large buffers, i.e. multiples of
- * the block size of the cipher.
- */
-
-static inline struct crypto_blkcipher *__crypto_blkcipher_cast(
- struct crypto_tfm *tfm)
-{
- return (struct crypto_blkcipher *)tfm;
-}
-
-static inline struct crypto_blkcipher *crypto_blkcipher_cast(
- struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_BLKCIPHER);
- return __crypto_blkcipher_cast(tfm);
-}
-
-/**
- * crypto_alloc_blkcipher() - allocate synchronous block cipher handle
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * blkcipher cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Allocate a cipher handle for a block cipher. The returned struct
- * crypto_blkcipher is the cipher handle that is required for any subsequent
- * API invocation for that block cipher.
- *
- * Return: allocated cipher handle in case of success; IS_ERR() is true in case
- * of an error, PTR_ERR() returns the error code.
- */
-static inline struct crypto_blkcipher *crypto_alloc_blkcipher(
- const char *alg_name, u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_BLKCIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return __crypto_blkcipher_cast(crypto_alloc_base(alg_name, type, mask));
-}
-
-static inline struct crypto_tfm *crypto_blkcipher_tfm(
- struct crypto_blkcipher *tfm)
-{
- return &tfm->base;
-}
-
-/**
- * crypto_free_blkcipher() - zeroize and free the block cipher handle
- * @tfm: cipher handle to be freed
- */
-static inline void crypto_free_blkcipher(struct crypto_blkcipher *tfm)
-{
- crypto_free_tfm(crypto_blkcipher_tfm(tfm));
-}
-
-/**
- * crypto_has_blkcipher() - Search for the availability of a block cipher
- * @alg_name: is the cra_name / name or cra_driver_name / driver name of the
- * block cipher
- * @type: specifies the type of the cipher
- * @mask: specifies the mask for the cipher
- *
- * Return: true when the block cipher is known to the kernel crypto API; false
- * otherwise
- */
-static inline int crypto_has_blkcipher(const char *alg_name, u32 type, u32 mask)
-{
- type &= ~CRYPTO_ALG_TYPE_MASK;
- type |= CRYPTO_ALG_TYPE_BLKCIPHER;
- mask |= CRYPTO_ALG_TYPE_MASK;
-
- return crypto_has_alg(alg_name, type, mask);
-}
-
-/**
- * crypto_blkcipher_name() - return the name / cra_name from the cipher handle
- * @tfm: cipher handle
- *
- * Return: The character string holding the name of the cipher
- */
-static inline const char *crypto_blkcipher_name(struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_alg_name(crypto_blkcipher_tfm(tfm));
-}
-
-static inline struct blkcipher_tfm *crypto_blkcipher_crt(
- struct crypto_blkcipher *tfm)
-{
- return &crypto_blkcipher_tfm(tfm)->crt_blkcipher;
-}
-
-static inline struct blkcipher_alg *crypto_blkcipher_alg(
- struct crypto_blkcipher *tfm)
-{
- return &crypto_blkcipher_tfm(tfm)->__crt_alg->cra_blkcipher;
-}
-
-/**
- * crypto_blkcipher_ivsize() - obtain IV size
- * @tfm: cipher handle
- *
- * The size of the IV for the block cipher referenced by the cipher handle is
- * returned. This IV size may be zero if the cipher does not need an IV.
- *
- * Return: IV size in bytes
- */
-static inline unsigned int crypto_blkcipher_ivsize(struct crypto_blkcipher *tfm)
-{
- return crypto_blkcipher_alg(tfm)->ivsize;
-}
-
-/**
- * crypto_blkcipher_blocksize() - obtain block size of cipher
- * @tfm: cipher handle
- *
- * The block size for the block cipher referenced with the cipher handle is
- * returned. The caller may use that information to allocate appropriate
- * memory for the data returned by the encryption or decryption operation.
- *
- * Return: block size of cipher
- */
-static inline unsigned int crypto_blkcipher_blocksize(
- struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_alg_blocksize(crypto_blkcipher_tfm(tfm));
-}
-
-static inline unsigned int crypto_blkcipher_alignmask(
- struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_alg_alignmask(crypto_blkcipher_tfm(tfm));
-}
-
-static inline u32 crypto_blkcipher_get_flags(struct crypto_blkcipher *tfm)
-{
- return crypto_tfm_get_flags(crypto_blkcipher_tfm(tfm));
-}
-
-static inline void crypto_blkcipher_set_flags(struct crypto_blkcipher *tfm,
- u32 flags)
-{
- crypto_tfm_set_flags(crypto_blkcipher_tfm(tfm), flags);
-}
-
-static inline void crypto_blkcipher_clear_flags(struct crypto_blkcipher *tfm,
- u32 flags)
-{
- crypto_tfm_clear_flags(crypto_blkcipher_tfm(tfm), flags);
-}
-
-/**
- * crypto_blkcipher_setkey() - set key for cipher
- * @tfm: cipher handle
- * @key: buffer holding the key
- * @keylen: length of the key in bytes
- *
- * The caller provided key is set for the block cipher referenced by the cipher
- * handle.
- *
- * Note, the key length determines the cipher type. Many block ciphers implement
- * different cipher modes depending on the key size, such as AES-128 vs AES-192
- * vs. AES-256. When providing a 16 byte key for an AES cipher handle, AES-128
- * is performed.
- *
- * Return: 0 if the setting of the key was successful; < 0 if an error occurred
- */
-static inline int crypto_blkcipher_setkey(struct crypto_blkcipher *tfm,
- const u8 *key, unsigned int keylen)
-{
- return crypto_blkcipher_crt(tfm)->setkey(crypto_blkcipher_tfm(tfm),
- key, keylen);
-}
-
-/**
- * crypto_blkcipher_encrypt() - encrypt plaintext
- * @desc: reference to the block cipher handle with meta data
- * @dst: scatter/gather list that is filled by the cipher operation with the
- * ciphertext
- * @src: scatter/gather list that holds the plaintext
- * @nbytes: number of bytes of the plaintext to encrypt.
- *
- * Encrypt plaintext data using the IV set by the caller with a preceding
- * call of crypto_blkcipher_set_iv.
- *
- * The blkcipher_desc data structure must be filled by the caller and can
- * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
- * with the block cipher handle; desc.flags is filled with either
- * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_blkcipher_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
- return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
-}
-
-/**
- * crypto_blkcipher_encrypt_iv() - encrypt plaintext with dedicated IV
- * @desc: reference to the block cipher handle with meta data
- * @dst: scatter/gather list that is filled by the cipher operation with the
- * ciphertext
- * @src: scatter/gather list that holds the plaintext
- * @nbytes: number of bytes of the plaintext to encrypt.
- *
- * Encrypt plaintext data with the use of an IV that is solely used for this
- * cipher operation. Any previously set IV is not used.
- *
- * The blkcipher_desc data structure must be filled by the caller and can
- * reside on the stack. The caller must fill desc as follows: desc.tfm is filled
- * with the block cipher handle; desc.info is filled with the IV to be used for
- * the current operation; desc.flags is filled with either
- * CRYPTO_TFM_REQ_MAY_SLEEP or 0.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_blkcipher_encrypt_iv(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return crypto_blkcipher_crt(desc->tfm)->encrypt(desc, dst, src, nbytes);
-}
-
-/**
- * crypto_blkcipher_decrypt() - decrypt ciphertext
- * @desc: reference to the block cipher handle with meta data
- * @dst: scatter/gather list that is filled by the cipher operation with the
- * plaintext
- * @src: scatter/gather list that holds the ciphertext
- * @nbytes: number of bytes of the ciphertext to decrypt.
- *
- * Decrypt ciphertext data using the IV set by the caller with a preceding
- * call of crypto_blkcipher_set_iv.
- *
- * The blkcipher_desc data structure must be filled by the caller as documented
- * for the crypto_blkcipher_encrypt call above.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- *
- */
-static inline int crypto_blkcipher_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- desc->info = crypto_blkcipher_crt(desc->tfm)->iv;
- return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
-}
-
-/**
- * crypto_blkcipher_decrypt_iv() - decrypt ciphertext with dedicated IV
- * @desc: reference to the block cipher handle with meta data
- * @dst: scatter/gather list that is filled by the cipher operation with the
- * plaintext
- * @src: scatter/gather list that holds the ciphertext
- * @nbytes: number of bytes of the ciphertext to decrypt.
- *
- * Decrypt ciphertext data with the use of an IV that is solely used for this
- * cipher operation. Any previously set IV is not used.
- *
- * The blkcipher_desc data structure must be filled by the caller as documented
- * for the crypto_blkcipher_encrypt_iv call above.
- *
- * Return: 0 if the cipher operation was successful; < 0 if an error occurred
- */
-static inline int crypto_blkcipher_decrypt_iv(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return crypto_blkcipher_crt(desc->tfm)->decrypt(desc, dst, src, nbytes);
-}
-
-/**
- * crypto_blkcipher_set_iv() - set IV for cipher
- * @tfm: cipher handle
- * @src: buffer holding the IV
- * @len: length of the IV in bytes
- *
- * The caller provided IV is set for the block cipher referenced by the cipher
- * handle.
- */
-static inline void crypto_blkcipher_set_iv(struct crypto_blkcipher *tfm,
- const u8 *src, unsigned int len)
-{
- memcpy(crypto_blkcipher_crt(tfm)->iv, src, len);
-}
-
-/**
- * crypto_blkcipher_get_iv() - obtain IV from cipher
- * @tfm: cipher handle
- * @dst: buffer filled with the IV
- * @len: length of the buffer dst
- *
- * The caller can obtain the IV set for the block cipher referenced by the
- * cipher handle and store it into the user-provided buffer. If the buffer
- * has an insufficient space, the IV is truncated to fit the buffer.
- */
-static inline void crypto_blkcipher_get_iv(struct crypto_blkcipher *tfm,
- u8 *dst, unsigned int len)
-{
- memcpy(dst, crypto_blkcipher_crt(tfm)->iv, len);
-}
-
/**
* DOC: Single Block Cipher API
*
@@ -1620,12 +732,6 @@ static inline struct crypto_cipher *__crypto_cipher_cast(struct crypto_tfm *tfm)
return (struct crypto_cipher *)tfm;
}
-static inline struct crypto_cipher *crypto_cipher_cast(struct crypto_tfm *tfm)
-{
- BUG_ON(crypto_tfm_alg_type(tfm) != CRYPTO_ALG_TYPE_CIPHER);
- return __crypto_cipher_cast(tfm);
-}
-
/**
* crypto_alloc_cipher() - allocate single block cipher handle
* @alg_name: is the cra_name / name or cra_driver_name / driver name of the
@@ -1683,11 +789,6 @@ static inline int crypto_has_cipher(const char *alg_name, u32 type, u32 mask)
return crypto_has_alg(alg_name, type, mask);
}
-static inline struct cipher_tfm *crypto_cipher_crt(struct crypto_cipher *tfm)
-{
- return &crypto_cipher_tfm(tfm)->crt_cipher;
-}
-
/**
* crypto_cipher_blocksize() - obtain block size for cipher
* @tfm: cipher handle
@@ -1741,12 +842,8 @@ static inline void crypto_cipher_clear_flags(struct crypto_cipher *tfm,
*
* Return: 0 if the setting of the key was successful; < 0 if an error occurred
*/
-static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
- const u8 *key, unsigned int keylen)
-{
- return crypto_cipher_crt(tfm)->cit_setkey(crypto_cipher_tfm(tfm),
- key, keylen);
-}
+int crypto_cipher_setkey(struct crypto_cipher *tfm,
+ const u8 *key, unsigned int keylen);
/**
* crypto_cipher_encrypt_one() - encrypt one block of plaintext
@@ -1757,12 +854,8 @@ static inline int crypto_cipher_setkey(struct crypto_cipher *tfm,
* Invoke the encryption operation of one block. The caller must ensure that
* the plaintext and ciphertext buffers are at least one block in size.
*/
-static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
- u8 *dst, const u8 *src)
-{
- crypto_cipher_crt(tfm)->cit_encrypt_one(crypto_cipher_tfm(tfm),
- dst, src);
-}
+void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
+ u8 *dst, const u8 *src);
/**
* crypto_cipher_decrypt_one() - decrypt one block of ciphertext
@@ -1773,25 +866,14 @@ static inline void crypto_cipher_encrypt_one(struct crypto_cipher *tfm,
* Invoke the decryption operation of one block. The caller must ensure that
* the plaintext and ciphertext buffers are at least one block in size.
*/
-static inline void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
- u8 *dst, const u8 *src)
-{
- crypto_cipher_crt(tfm)->cit_decrypt_one(crypto_cipher_tfm(tfm),
- dst, src);
-}
+void crypto_cipher_decrypt_one(struct crypto_cipher *tfm,
+ u8 *dst, const u8 *src);
static inline struct crypto_comp *__crypto_comp_cast(struct crypto_tfm *tfm)
{
return (struct crypto_comp *)tfm;
}
-static inline struct crypto_comp *crypto_comp_cast(struct crypto_tfm *tfm)
-{
- BUG_ON((crypto_tfm_alg_type(tfm) ^ CRYPTO_ALG_TYPE_COMPRESS) &
- CRYPTO_ALG_TYPE_MASK);
- return __crypto_comp_cast(tfm);
-}
-
static inline struct crypto_comp *crypto_alloc_comp(const char *alg_name,
u32 type, u32 mask)
{
@@ -1826,26 +908,13 @@ static inline const char *crypto_comp_name(struct crypto_comp *tfm)
return crypto_tfm_alg_name(crypto_comp_tfm(tfm));
}
-static inline struct compress_tfm *crypto_comp_crt(struct crypto_comp *tfm)
-{
- return &crypto_comp_tfm(tfm)->crt_compress;
-}
+int crypto_comp_compress(struct crypto_comp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
-static inline int crypto_comp_compress(struct crypto_comp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- return crypto_comp_crt(tfm)->cot_compress(crypto_comp_tfm(tfm),
- src, slen, dst, dlen);
-}
-
-static inline int crypto_comp_decompress(struct crypto_comp *tfm,
- const u8 *src, unsigned int slen,
- u8 *dst, unsigned int *dlen)
-{
- return crypto_comp_crt(tfm)->cot_decompress(crypto_comp_tfm(tfm),
- src, slen, dst, dlen);
-}
+int crypto_comp_decompress(struct crypto_comp *tfm,
+ const u8 *src, unsigned int slen,
+ u8 *dst, unsigned int *dlen);
#endif /* _LINUX_CRYPTO_H */
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 9bd8528bd305..328c2dbb4409 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -129,11 +129,6 @@ static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
sectors);
}
-static inline struct dax_device *fs_dax_get_by_host(const char *host)
-{
- return dax_get_by_host(host);
-}
-
static inline void fs_put_dax(struct dax_device *dax_dev)
{
put_dax(dax_dev);
@@ -141,7 +136,7 @@ static inline void fs_put_dax(struct dax_device *dax_dev)
struct dax_device *fs_dax_get_by_bdev(struct block_device *bdev);
int dax_writeback_mapping_range(struct address_space *mapping,
- struct block_device *bdev, struct writeback_control *wbc);
+ struct dax_device *dax_dev, struct writeback_control *wbc);
struct page *dax_layout_busy_page(struct address_space *mapping);
dax_entry_t dax_lock_page(struct page *page);
@@ -160,11 +155,6 @@ static inline bool generic_fsdax_supported(struct dax_device *dax_dev,
return false;
}
-static inline struct dax_device *fs_dax_get_by_host(const char *host)
-{
- return NULL;
-}
-
static inline void fs_put_dax(struct dax_device *dax_dev)
{
}
@@ -180,7 +170,7 @@ static inline struct page *dax_layout_busy_page(struct address_space *mapping)
}
static inline int dax_writeback_mapping_range(struct address_space *mapping,
- struct block_device *bdev, struct writeback_control *wbc)
+ struct dax_device *dax_dev, struct writeback_control *wbc)
{
return -EOPNOTSUPP;
}
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 10090f11ab95..c1488cc84fd9 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -440,6 +440,11 @@ static inline bool d_is_negative(const struct dentry *dentry)
return d_is_miss(dentry);
}
+static inline bool d_flags_negative(unsigned flags)
+{
+ return (flags & DCACHE_ENTRY_TYPE) == DCACHE_MISS_TYPE;
+}
+
static inline bool d_is_positive(const struct dentry *dentry)
{
return !d_is_negative(dentry);
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 58424eb3b329..d672b7db0cfc 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -54,6 +54,8 @@ static const struct file_operations __fops = { \
.llseek = no_llseek, \
}
+typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
+
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_lookup(const char *name, struct dentry *parent);
@@ -65,24 +67,23 @@ struct dentry *debugfs_create_file_unsafe(const char *name, umode_t mode,
struct dentry *parent, void *data,
const struct file_operations *fops);
-struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
- struct dentry *parent, void *data,
- const struct file_operations *fops,
- loff_t file_size);
+void debugfs_create_file_size(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops,
+ loff_t file_size);
struct dentry *debugfs_create_dir(const char *name, struct dentry *parent);
struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
const char *dest);
-typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
struct dentry *debugfs_create_automount(const char *name,
struct dentry *parent,
debugfs_automount_t f,
void *data);
void debugfs_remove(struct dentry *dentry);
-void debugfs_remove_recursive(struct dentry *dentry);
+#define debugfs_remove_recursive debugfs_remove
const struct file_operations *debugfs_real_fops(const struct file *filp);
@@ -97,28 +98,28 @@ ssize_t debugfs_attr_write(struct file *file, const char __user *buf,
struct dentry *debugfs_rename(struct dentry *old_dir, struct dentry *old_dentry,
struct dentry *new_dir, const char *new_name);
-struct dentry *debugfs_create_u8(const char *name, umode_t mode,
- struct dentry *parent, u8 *value);
-struct dentry *debugfs_create_u16(const char *name, umode_t mode,
- struct dentry *parent, u16 *value);
+void debugfs_create_u8(const char *name, umode_t mode, struct dentry *parent,
+ u8 *value);
+void debugfs_create_u16(const char *name, umode_t mode, struct dentry *parent,
+ u16 *value);
struct dentry *debugfs_create_u32(const char *name, umode_t mode,
struct dentry *parent, u32 *value);
-struct dentry *debugfs_create_u64(const char *name, umode_t mode,
- struct dentry *parent, u64 *value);
+void debugfs_create_u64(const char *name, umode_t mode, struct dentry *parent,
+ u64 *value);
struct dentry *debugfs_create_ulong(const char *name, umode_t mode,
struct dentry *parent, unsigned long *value);
-struct dentry *debugfs_create_x8(const char *name, umode_t mode,
- struct dentry *parent, u8 *value);
-struct dentry *debugfs_create_x16(const char *name, umode_t mode,
- struct dentry *parent, u16 *value);
-struct dentry *debugfs_create_x32(const char *name, umode_t mode,
- struct dentry *parent, u32 *value);
-struct dentry *debugfs_create_x64(const char *name, umode_t mode,
- struct dentry *parent, u64 *value);
-struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
- struct dentry *parent, size_t *value);
-struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
- struct dentry *parent, atomic_t *value);
+void debugfs_create_x8(const char *name, umode_t mode, struct dentry *parent,
+ u8 *value);
+void debugfs_create_x16(const char *name, umode_t mode, struct dentry *parent,
+ u16 *value);
+void debugfs_create_x32(const char *name, umode_t mode, struct dentry *parent,
+ u32 *value);
+void debugfs_create_x64(const char *name, umode_t mode, struct dentry *parent,
+ u64 *value);
+void debugfs_create_size_t(const char *name, umode_t mode,
+ struct dentry *parent, size_t *value);
+void debugfs_create_atomic_t(const char *name, umode_t mode,
+ struct dentry *parent, atomic_t *value);
struct dentry *debugfs_create_bool(const char *name, umode_t mode,
struct dentry *parent, bool *value);
@@ -126,9 +127,9 @@ struct dentry *debugfs_create_blob(const char *name, umode_t mode,
struct dentry *parent,
struct debugfs_blob_wrapper *blob);
-struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
- struct dentry *parent,
- struct debugfs_regset32 *regset);
+void debugfs_create_regset32(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_regset32 *regset);
void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
int nregs, void __iomem *base, char *prefix);
@@ -180,13 +181,11 @@ static inline struct dentry *debugfs_create_file_unsafe(const char *name,
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_file_size(const char *name, umode_t mode,
- struct dentry *parent, void *data,
- const struct file_operations *fops,
- loff_t file_size)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_file_size(const char *name, umode_t mode,
+ struct dentry *parent, void *data,
+ const struct file_operations *fops,
+ loff_t file_size)
+{ }
static inline struct dentry *debugfs_create_dir(const char *name,
struct dentry *parent)
@@ -203,7 +202,7 @@ static inline struct dentry *debugfs_create_symlink(const char *name,
static inline struct dentry *debugfs_create_automount(const char *name,
struct dentry *parent,
- struct vfsmount *(*f)(void *),
+ debugfs_automount_t f,
void *data)
{
return ERR_PTR(-ENODEV);
@@ -244,19 +243,11 @@ static inline struct dentry *debugfs_rename(struct dentry *old_dir, struct dentr
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_u8(const char *name, umode_t mode,
- struct dentry *parent,
- u8 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_u8(const char *name, umode_t mode,
+ struct dentry *parent, u8 *value) { }
-static inline struct dentry *debugfs_create_u16(const char *name, umode_t mode,
- struct dentry *parent,
- u16 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_u16(const char *name, umode_t mode,
+ struct dentry *parent, u16 *value) { }
static inline struct dentry *debugfs_create_u32(const char *name, umode_t mode,
struct dentry *parent,
@@ -265,12 +256,8 @@ static inline struct dentry *debugfs_create_u32(const char *name, umode_t mode,
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_u64(const char *name, umode_t mode,
- struct dentry *parent,
- u64 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_u64(const char *name, umode_t mode,
+ struct dentry *parent, u64 *value) { }
static inline struct dentry *debugfs_create_ulong(const char *name,
umode_t mode,
@@ -280,46 +267,26 @@ static inline struct dentry *debugfs_create_ulong(const char *name,
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_x8(const char *name, umode_t mode,
- struct dentry *parent,
- u8 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_x8(const char *name, umode_t mode,
+ struct dentry *parent, u8 *value) { }
-static inline struct dentry *debugfs_create_x16(const char *name, umode_t mode,
- struct dentry *parent,
- u16 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_x16(const char *name, umode_t mode,
+ struct dentry *parent, u16 *value) { }
-static inline struct dentry *debugfs_create_x32(const char *name, umode_t mode,
- struct dentry *parent,
- u32 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_x32(const char *name, umode_t mode,
+ struct dentry *parent, u32 *value) { }
-static inline struct dentry *debugfs_create_x64(const char *name, umode_t mode,
- struct dentry *parent,
- u64 *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_x64(const char *name, umode_t mode,
+ struct dentry *parent, u64 *value) { }
-static inline struct dentry *debugfs_create_size_t(const char *name, umode_t mode,
- struct dentry *parent,
- size_t *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_size_t(const char *name, umode_t mode,
+ struct dentry *parent, size_t *value)
+{ }
-static inline struct dentry *debugfs_create_atomic_t(const char *name, umode_t mode,
- struct dentry *parent, atomic_t *value)
-{
- return ERR_PTR(-ENODEV);
-}
+static inline void debugfs_create_atomic_t(const char *name, umode_t mode,
+ struct dentry *parent,
+ atomic_t *value)
+{ }
static inline struct dentry *debugfs_create_bool(const char *name, umode_t mode,
struct dentry *parent,
@@ -335,11 +302,10 @@ static inline struct dentry *debugfs_create_blob(const char *name, umode_t mode,
return ERR_PTR(-ENODEV);
}
-static inline struct dentry *debugfs_create_regset32(const char *name,
- umode_t mode, struct dentry *parent,
- struct debugfs_regset32 *regset)
+static inline void debugfs_create_regset32(const char *name, umode_t mode,
+ struct dentry *parent,
+ struct debugfs_regset32 *regset)
{
- return ERR_PTR(-ENODEV);
}
static inline void debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
@@ -383,4 +349,25 @@ static inline ssize_t debugfs_write_file_bool(struct file *file,
#endif
+/**
+ * debugfs_create_xul - create a debugfs file that is used to read and write an
+ * unsigned long value, formatted in hexadecimal
+ * @name: a pointer to a string containing the name of the file to create.
+ * @mode: the permission that the file should have
+ * @parent: a pointer to the parent dentry for this file. This should be a
+ * directory dentry if set. If this parameter is %NULL, then the
+ * file will be created in the root of the debugfs filesystem.
+ * @value: a pointer to the variable that the file should read to and write
+ * from.
+ */
+static inline void debugfs_create_xul(const char *name, umode_t mode,
+ struct dentry *parent,
+ unsigned long *value)
+{
+ if (sizeof(*value) == sizeof(u32))
+ debugfs_create_x32(name, mode, parent, (u32 *)value);
+ else
+ debugfs_create_x64(name, mode, parent, (u64 *)value);
+}
+
#endif
diff --git a/include/linux/dev_printk.h b/include/linux/dev_printk.h
new file mode 100644
index 000000000000..5aad06b4ca7b
--- /dev/null
+++ b/include/linux/dev_printk.h
@@ -0,0 +1,235 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * dev_printk.h - printk messages helpers for devices
+ *
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2008-2009 Novell Inc.
+ *
+ */
+
+#ifndef _DEVICE_PRINTK_H_
+#define _DEVICE_PRINTK_H_
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/ratelimit.h>
+
+#ifndef dev_fmt
+#define dev_fmt(fmt) fmt
+#endif
+
+struct device;
+
+#ifdef CONFIG_PRINTK
+
+__printf(3, 0) __cold
+int dev_vprintk_emit(int level, const struct device *dev,
+ const char *fmt, va_list args);
+__printf(3, 4) __cold
+int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
+
+__printf(3, 4) __cold
+void dev_printk(const char *level, const struct device *dev,
+ const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_emerg(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_alert(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_crit(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_err(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_warn(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_notice(const struct device *dev, const char *fmt, ...);
+__printf(2, 3) __cold
+void _dev_info(const struct device *dev, const char *fmt, ...);
+
+#else
+
+static inline __printf(3, 0)
+int dev_vprintk_emit(int level, const struct device *dev,
+ const char *fmt, va_list args)
+{ return 0; }
+static inline __printf(3, 4)
+int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
+{ return 0; }
+
+static inline void __dev_printk(const char *level, const struct device *dev,
+ struct va_format *vaf)
+{}
+static inline __printf(3, 4)
+void dev_printk(const char *level, const struct device *dev,
+ const char *fmt, ...)
+{}
+
+static inline __printf(2, 3)
+void _dev_emerg(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_crit(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_alert(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_err(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_warn(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_notice(const struct device *dev, const char *fmt, ...)
+{}
+static inline __printf(2, 3)
+void _dev_info(const struct device *dev, const char *fmt, ...)
+{}
+
+#endif
+
+/*
+ * #defines for all the dev_<level> macros to prefix with whatever
+ * possible use of #define dev_fmt(fmt) ...
+ */
+
+#define dev_emerg(dev, fmt, ...) \
+ _dev_emerg(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_crit(dev, fmt, ...) \
+ _dev_crit(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_alert(dev, fmt, ...) \
+ _dev_alert(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_err(dev, fmt, ...) \
+ _dev_err(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_warn(dev, fmt, ...) \
+ _dev_warn(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_notice(dev, fmt, ...) \
+ _dev_notice(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#define dev_info(dev, fmt, ...) \
+ _dev_info(dev, dev_fmt(fmt), ##__VA_ARGS__)
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define dev_dbg(dev, fmt, ...) \
+ dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__)
+#elif defined(DEBUG)
+#define dev_dbg(dev, fmt, ...) \
+ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
+#else
+#define dev_dbg(dev, fmt, ...) \
+({ \
+ if (0) \
+ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+})
+#endif
+
+#ifdef CONFIG_PRINTK
+#define dev_level_once(dev_level, dev, fmt, ...) \
+do { \
+ static bool __print_once __read_mostly; \
+ \
+ if (!__print_once) { \
+ __print_once = true; \
+ dev_level(dev, fmt, ##__VA_ARGS__); \
+ } \
+} while (0)
+#else
+#define dev_level_once(dev_level, dev, fmt, ...) \
+do { \
+ if (0) \
+ dev_level(dev, fmt, ##__VA_ARGS__); \
+} while (0)
+#endif
+
+#define dev_emerg_once(dev, fmt, ...) \
+ dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__)
+#define dev_alert_once(dev, fmt, ...) \
+ dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__)
+#define dev_crit_once(dev, fmt, ...) \
+ dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__)
+#define dev_err_once(dev, fmt, ...) \
+ dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__)
+#define dev_warn_once(dev, fmt, ...) \
+ dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__)
+#define dev_notice_once(dev, fmt, ...) \
+ dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__)
+#define dev_info_once(dev, fmt, ...) \
+ dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
+#define dev_dbg_once(dev, fmt, ...) \
+ dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__)
+
+#define dev_level_ratelimited(dev_level, dev, fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ if (__ratelimit(&_rs)) \
+ dev_level(dev, fmt, ##__VA_ARGS__); \
+} while (0)
+
+#define dev_emerg_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__)
+#define dev_alert_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__)
+#define dev_crit_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__)
+#define dev_err_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__)
+#define dev_warn_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__)
+#define dev_notice_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
+#define dev_info_ratelimited(dev, fmt, ...) \
+ dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
+#if defined(CONFIG_DYNAMIC_DEBUG)
+/* descriptor check is first to prevent flooding with "callbacks suppressed" */
+#define dev_dbg_ratelimited(dev, fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
+ if (DYNAMIC_DEBUG_BRANCH(descriptor) && \
+ __ratelimit(&_rs)) \
+ __dynamic_dev_dbg(&descriptor, dev, dev_fmt(fmt), \
+ ##__VA_ARGS__); \
+} while (0)
+#elif defined(DEBUG)
+#define dev_dbg_ratelimited(dev, fmt, ...) \
+do { \
+ static DEFINE_RATELIMIT_STATE(_rs, \
+ DEFAULT_RATELIMIT_INTERVAL, \
+ DEFAULT_RATELIMIT_BURST); \
+ if (__ratelimit(&_rs)) \
+ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+} while (0)
+#else
+#define dev_dbg_ratelimited(dev, fmt, ...) \
+do { \
+ if (0) \
+ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+} while (0)
+#endif
+
+#ifdef VERBOSE_DEBUG
+#define dev_vdbg dev_dbg
+#else
+#define dev_vdbg(dev, fmt, ...) \
+({ \
+ if (0) \
+ dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
+})
+#endif
+
+/*
+ * dev_WARN*() acts like dev_printk(), but with the key difference of
+ * using WARN/WARN_ONCE to include file/line information and a backtrace.
+ */
+#define dev_WARN(dev, format, arg...) \
+ WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
+
+#define dev_WARN_ONCE(dev, condition, format, arg...) \
+ WARN_ONCE(condition, "%s %s: " format, \
+ dev_driver_string(dev), dev_name(dev), ## arg)
+
+#endif /* _DEVICE_PRINTK_H_ */
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index 2bae9ed3c783..57e871a559a9 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -13,6 +13,7 @@
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/pm_opp.h>
+#include <linux/pm_qos.h>
#define DEVFREQ_NAME_LEN 16
@@ -107,6 +108,20 @@ struct devfreq_dev_profile {
};
/**
+ * struct devfreq_stats - Statistics of devfreq device behavior
+ * @total_trans: Number of devfreq transitions.
+ * @trans_table: Statistics of devfreq transitions.
+ * @time_in_state: Statistics of devfreq states.
+ * @last_update: The last time stats were updated.
+ */
+struct devfreq_stats {
+ unsigned int total_trans;
+ unsigned int *trans_table;
+ u64 *time_in_state;
+ u64 last_update;
+};
+
+/**
* struct devfreq - Device devfreq structure
* @node: list node - contains the devices with devfreq that have been
* registered.
@@ -121,29 +136,29 @@ struct devfreq_dev_profile {
* devfreq.nb to the corresponding register notifier call chain.
* @work: delayed work for load monitoring.
* @previous_freq: previously configured frequency value.
+ * @last_status: devfreq user device info, performance statistics
* @data: Private data of the governor. The devfreq framework does not
* touch this.
- * @min_freq: Limit minimum frequency requested by user (0: none)
- * @max_freq: Limit maximum frequency requested by user (0: none)
+ * @user_min_freq_req: PM QoS minimum frequency request from user (via sysfs)
+ * @user_max_freq_req: PM QoS maximum frequency request from user (via sysfs)
* @scaling_min_freq: Limit minimum frequency requested by OPP interface
* @scaling_max_freq: Limit maximum frequency requested by OPP interface
* @stop_polling: devfreq polling status of a device.
* @suspend_freq: frequency of a device set during suspend phase.
* @resume_freq: frequency of a device set in resume phase.
* @suspend_count: suspend requests counter for a device.
- * @total_trans: Number of devfreq transitions
- * @trans_table: Statistics of devfreq transitions
- * @time_in_state: Statistics of devfreq states
- * @last_stat_updated: The last time stat updated
+ * @stats: Statistics of devfreq device behavior
* @transition_notifier_list: list head of DEVFREQ_TRANSITION_NOTIFIER notifier
+ * @nb_min: Notifier block for DEV_PM_QOS_MIN_FREQUENCY
+ * @nb_max: Notifier block for DEV_PM_QOS_MAX_FREQUENCY
*
- * This structure stores the devfreq information for a give device.
+ * This structure stores the devfreq information for a given device.
*
* Note that when a governor accesses entries in struct devfreq in its
* functions except for the context of callbacks defined in struct
* devfreq_governor, the governor should protect its access with the
* struct mutex lock in struct devfreq. A governor may use this mutex
- * to protect its own private data in void *data as well.
+ * to protect its own private data in ``void *data`` as well.
*/
struct devfreq {
struct list_head node;
@@ -161,8 +176,8 @@ struct devfreq {
void *data; /* private data for governors */
- unsigned long min_freq;
- unsigned long max_freq;
+ struct dev_pm_qos_request user_min_freq_req;
+ struct dev_pm_qos_request user_max_freq_req;
unsigned long scaling_min_freq;
unsigned long scaling_max_freq;
bool stop_polling;
@@ -171,13 +186,13 @@ struct devfreq {
unsigned long resume_freq;
atomic_t suspend_count;
- /* information for device frequency transition */
- unsigned int total_trans;
- unsigned int *trans_table;
- unsigned long *time_in_state;
- unsigned long last_stat_updated;
+ /* information for device frequency transitions */
+ struct devfreq_stats stats;
struct srcu_notifier_head transition_notifier_list;
+
+ struct notifier_block nb_min;
+ struct notifier_block nb_max;
};
struct devfreq_freqs {
@@ -186,24 +201,23 @@ struct devfreq_freqs {
};
#if defined(CONFIG_PM_DEVFREQ)
-extern struct devfreq *devfreq_add_device(struct device *dev,
- struct devfreq_dev_profile *profile,
- const char *governor_name,
- void *data);
-extern int devfreq_remove_device(struct devfreq *devfreq);
-extern struct devfreq *devm_devfreq_add_device(struct device *dev,
- struct devfreq_dev_profile *profile,
- const char *governor_name,
- void *data);
-extern void devm_devfreq_remove_device(struct device *dev,
- struct devfreq *devfreq);
+struct devfreq *devfreq_add_device(struct device *dev,
+ struct devfreq_dev_profile *profile,
+ const char *governor_name,
+ void *data);
+int devfreq_remove_device(struct devfreq *devfreq);
+struct devfreq *devm_devfreq_add_device(struct device *dev,
+ struct devfreq_dev_profile *profile,
+ const char *governor_name,
+ void *data);
+void devm_devfreq_remove_device(struct device *dev, struct devfreq *devfreq);
/* Supposed to be called by PM callbacks */
-extern int devfreq_suspend_device(struct devfreq *devfreq);
-extern int devfreq_resume_device(struct devfreq *devfreq);
+int devfreq_suspend_device(struct devfreq *devfreq);
+int devfreq_resume_device(struct devfreq *devfreq);
-extern void devfreq_suspend(void);
-extern void devfreq_resume(void);
+void devfreq_suspend(void);
+void devfreq_resume(void);
/**
* update_devfreq() - Reevaluate the device and configure frequency
@@ -211,39 +225,38 @@ extern void devfreq_resume(void);
*
* Note: devfreq->lock must be held
*/
-extern int update_devfreq(struct devfreq *devfreq);
+int update_devfreq(struct devfreq *devfreq);
/* Helper functions for devfreq user device driver with OPP. */
-extern struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
- unsigned long *freq, u32 flags);
-extern int devfreq_register_opp_notifier(struct device *dev,
- struct devfreq *devfreq);
-extern int devfreq_unregister_opp_notifier(struct device *dev,
- struct devfreq *devfreq);
-extern int devm_devfreq_register_opp_notifier(struct device *dev,
- struct devfreq *devfreq);
-extern void devm_devfreq_unregister_opp_notifier(struct device *dev,
- struct devfreq *devfreq);
-extern int devfreq_register_notifier(struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list);
-extern int devfreq_unregister_notifier(struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list);
-extern int devm_devfreq_register_notifier(struct device *dev,
+struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
+ unsigned long *freq, u32 flags);
+int devfreq_register_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+int devfreq_unregister_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+int devm_devfreq_register_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+void devm_devfreq_unregister_opp_notifier(struct device *dev,
+ struct devfreq *devfreq);
+int devfreq_register_notifier(struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list);
+int devfreq_unregister_notifier(struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list);
+int devm_devfreq_register_notifier(struct device *dev,
struct devfreq *devfreq,
struct notifier_block *nb,
unsigned int list);
-extern void devm_devfreq_unregister_notifier(struct device *dev,
+void devm_devfreq_unregister_notifier(struct device *dev,
struct devfreq *devfreq,
struct notifier_block *nb,
unsigned int list);
-extern struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
- int index);
+struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev, int index);
#if IS_ENABLED(CONFIG_DEVFREQ_GOV_SIMPLE_ONDEMAND)
/**
- * struct devfreq_simple_ondemand_data - void *data fed to struct devfreq
+ * struct devfreq_simple_ondemand_data - ``void *data`` fed to struct devfreq
* and devfreq_add_device
* @upthreshold: If the load is over this value, the frequency jumps.
* Specify 0 to use the default. Valid value = 0 to 100.
@@ -263,7 +276,7 @@ struct devfreq_simple_ondemand_data {
#if IS_ENABLED(CONFIG_DEVFREQ_GOV_PASSIVE)
/**
- * struct devfreq_passive_data - void *data fed to struct devfreq
+ * struct devfreq_passive_data - ``void *data`` fed to struct devfreq
* and devfreq_add_device
* @parent: the devfreq instance of parent device.
* @get_target_freq: Optional callback, Returns desired operating frequency
@@ -296,9 +309,9 @@ struct devfreq_passive_data {
#else /* !CONFIG_PM_DEVFREQ */
static inline struct devfreq *devfreq_add_device(struct device *dev,
- struct devfreq_dev_profile *profile,
- const char *governor_name,
- void *data)
+ struct devfreq_dev_profile *profile,
+ const char *governor_name,
+ void *data)
{
return ERR_PTR(-ENOSYS);
}
@@ -335,31 +348,31 @@ static inline void devfreq_suspend(void) {}
static inline void devfreq_resume(void) {}
static inline struct dev_pm_opp *devfreq_recommended_opp(struct device *dev,
- unsigned long *freq, u32 flags)
+ unsigned long *freq, u32 flags)
{
return ERR_PTR(-EINVAL);
}
static inline int devfreq_register_opp_notifier(struct device *dev,
- struct devfreq *devfreq)
+ struct devfreq *devfreq)
{
return -EINVAL;
}
static inline int devfreq_unregister_opp_notifier(struct device *dev,
- struct devfreq *devfreq)
+ struct devfreq *devfreq)
{
return -EINVAL;
}
static inline int devm_devfreq_register_opp_notifier(struct device *dev,
- struct devfreq *devfreq)
+ struct devfreq *devfreq)
{
return -EINVAL;
}
static inline void devm_devfreq_unregister_opp_notifier(struct device *dev,
- struct devfreq *devfreq)
+ struct devfreq *devfreq)
{
}
@@ -378,22 +391,22 @@ static inline int devfreq_unregister_notifier(struct devfreq *devfreq,
}
static inline int devm_devfreq_register_notifier(struct device *dev,
- struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list)
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
{
return 0;
}
static inline void devm_devfreq_unregister_notifier(struct device *dev,
- struct devfreq *devfreq,
- struct notifier_block *nb,
- unsigned int list)
+ struct devfreq *devfreq,
+ struct notifier_block *nb,
+ unsigned int list)
{
}
static inline struct devfreq *devfreq_get_devfreq_by_phandle(struct device *dev,
- int index)
+ int index)
{
return ERR_PTR(-ENODEV);
}
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 399ad8632356..475668c69dbc 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -17,6 +17,7 @@
struct dm_dev;
struct dm_target;
struct dm_table;
+struct dm_report_zones_args;
struct mapped_device;
struct bio_vec;
@@ -93,9 +94,9 @@ typedef int (*dm_message_fn) (struct dm_target *ti, unsigned argc, char **argv,
typedef int (*dm_prepare_ioctl_fn) (struct dm_target *ti, struct block_device **bdev);
-typedef int (*dm_report_zones_fn) (struct dm_target *ti, sector_t sector,
- struct blk_zone *zones,
- unsigned int *nr_zones);
+typedef int (*dm_report_zones_fn) (struct dm_target *ti,
+ struct dm_report_zones_args *args,
+ unsigned int nr_zones);
/*
* These iteration functions are typically used to check (and combine)
@@ -422,10 +423,23 @@ struct gendisk *dm_disk(struct mapped_device *md);
int dm_suspended(struct dm_target *ti);
int dm_noflush_suspending(struct dm_target *ti);
void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors);
-void dm_remap_zone_report(struct dm_target *ti, sector_t start,
- struct blk_zone *zones, unsigned int *nr_zones);
union map_info *dm_get_rq_mapinfo(struct request *rq);
+#ifdef CONFIG_BLK_DEV_ZONED
+struct dm_report_zones_args {
+ struct dm_target *tgt;
+ sector_t next_sector;
+
+ void *orig_data;
+ report_zones_cb orig_cb;
+ unsigned int zone_idx;
+
+ /* must be filled by ->report_zones before calling dm_report_zones_cb */
+ sector_t start;
+};
+int dm_report_zones_cb(struct blk_zone *zone, unsigned int idx, void *data);
+#endif /* CONFIG_BLK_DEV_ZONED */
+
/*
* Device mapper functions to parse and create devices specified by the
* parameter "dm-mod.create="
@@ -594,9 +608,6 @@ void *dm_vcalloc(unsigned long nmemb, unsigned long elem_size);
*/
#define dm_round_up(n, sz) (dm_div_up((n), (sz)) * (sz))
-#define dm_array_too_big(fixed, obj, num) \
- ((num) > (UINT_MAX - (fixed)) / (obj))
-
/*
* Sector offset taken relative to the start of the target instead of
* relative to the start of the device.
diff --git a/include/linux/device.h b/include/linux/device.h
index 297239a08bb7..fa04dfd22bbc 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -12,6 +12,7 @@
#ifndef _DEVICE_H_
#define _DEVICE_H_
+#include <linux/dev_printk.h>
#include <linux/ioport.h>
#include <linux/kobject.h>
#include <linux/klist.h>
@@ -22,10 +23,12 @@
#include <linux/mutex.h>
#include <linux/pm.h>
#include <linux/atomic.h>
-#include <linux/ratelimit.h>
#include <linux/uidgid.h>
#include <linux/gfp.h>
#include <linux/overflow.h>
+#include <linux/device/bus.h>
+#include <linux/device/class.h>
+#include <linux/device/driver.h>
#include <asm/device.h>
struct device;
@@ -35,7 +38,6 @@ struct driver_private;
struct module;
struct class;
struct subsys_private;
-struct bus_type;
struct device_node;
struct fwnode_handle;
struct iommu_ops;
@@ -44,474 +46,6 @@ struct iommu_fwspec;
struct dev_pin_info;
struct iommu_param;
-struct bus_attribute {
- struct attribute attr;
- ssize_t (*show)(struct bus_type *bus, char *buf);
- ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
-};
-
-#define BUS_ATTR_RW(_name) \
- struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
-#define BUS_ATTR_RO(_name) \
- struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
-#define BUS_ATTR_WO(_name) \
- struct bus_attribute bus_attr_##_name = __ATTR_WO(_name)
-
-extern int __must_check bus_create_file(struct bus_type *,
- struct bus_attribute *);
-extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
-
-/**
- * struct bus_type - The bus type of the device
- *
- * @name: The name of the bus.
- * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id).
- * @dev_root: Default device to use as the parent.
- * @bus_groups: Default attributes of the bus.
- * @dev_groups: Default attributes of the devices on the bus.
- * @drv_groups: Default attributes of the device drivers on the bus.
- * @match: Called, perhaps multiple times, whenever a new device or driver
- * is added for this bus. It should return a positive value if the
- * given device can be handled by the given driver and zero
- * otherwise. It may also return error code if determining that
- * the driver supports the device is not possible. In case of
- * -EPROBE_DEFER it will queue the device for deferred probing.
- * @uevent: Called when a device is added, removed, or a few other things
- * that generate uevents to add the environment variables.
- * @probe: Called when a new device or driver add to this bus, and callback
- * the specific driver's probe to initial the matched device.
- * @remove: Called when a device removed from this bus.
- * @shutdown: Called at shut-down time to quiesce the device.
- *
- * @online: Called to put the device back online (after offlining it).
- * @offline: Called to put the device offline for hot-removal. May fail.
- *
- * @suspend: Called when a device on this bus wants to go to sleep mode.
- * @resume: Called to bring a device on this bus out of sleep mode.
- * @num_vf: Called to find out how many virtual functions a device on this
- * bus supports.
- * @dma_configure: Called to setup DMA configuration on a device on
- * this bus.
- * @pm: Power management operations of this bus, callback the specific
- * device driver's pm-ops.
- * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
- * driver implementations to a bus and allow the driver to do
- * bus-specific setup
- * @p: The private data of the driver core, only the driver core can
- * touch this.
- * @lock_key: Lock class key for use by the lock validator
- * @need_parent_lock: When probing or removing a device on this bus, the
- * device core should lock the device's parent.
- *
- * A bus is a channel between the processor and one or more devices. For the
- * purposes of the device model, all devices are connected via a bus, even if
- * it is an internal, virtual, "platform" bus. Buses can plug into each other.
- * A USB controller is usually a PCI device, for example. The device model
- * represents the actual connections between buses and the devices they control.
- * A bus is represented by the bus_type structure. It contains the name, the
- * default attributes, the bus' methods, PM operations, and the driver core's
- * private data.
- */
-struct bus_type {
- const char *name;
- const char *dev_name;
- struct device *dev_root;
- const struct attribute_group **bus_groups;
- const struct attribute_group **dev_groups;
- const struct attribute_group **drv_groups;
-
- int (*match)(struct device *dev, struct device_driver *drv);
- int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
- int (*probe)(struct device *dev);
- int (*remove)(struct device *dev);
- void (*shutdown)(struct device *dev);
-
- int (*online)(struct device *dev);
- int (*offline)(struct device *dev);
-
- int (*suspend)(struct device *dev, pm_message_t state);
- int (*resume)(struct device *dev);
-
- int (*num_vf)(struct device *dev);
-
- int (*dma_configure)(struct device *dev);
-
- const struct dev_pm_ops *pm;
-
- const struct iommu_ops *iommu_ops;
-
- struct subsys_private *p;
- struct lock_class_key lock_key;
-
- bool need_parent_lock;
-};
-
-extern int __must_check bus_register(struct bus_type *bus);
-
-extern void bus_unregister(struct bus_type *bus);
-
-extern int __must_check bus_rescan_devices(struct bus_type *bus);
-
-/* iterator helpers for buses */
-struct subsys_dev_iter {
- struct klist_iter ki;
- const struct device_type *type;
-};
-void subsys_dev_iter_init(struct subsys_dev_iter *iter,
- struct bus_type *subsys,
- struct device *start,
- const struct device_type *type);
-struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
-void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
-
-int device_match_name(struct device *dev, const void *name);
-int device_match_of_node(struct device *dev, const void *np);
-int device_match_fwnode(struct device *dev, const void *fwnode);
-int device_match_devt(struct device *dev, const void *pdevt);
-int device_match_acpi_dev(struct device *dev, const void *adev);
-int device_match_any(struct device *dev, const void *unused);
-
-int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
- int (*fn)(struct device *dev, void *data));
-struct device *bus_find_device(struct bus_type *bus, struct device *start,
- const void *data,
- int (*match)(struct device *dev, const void *data));
-/**
- * bus_find_device_by_name - device iterator for locating a particular device
- * of a specific name.
- * @bus: bus type
- * @start: Device to begin with
- * @name: name of the device to match
- */
-static inline struct device *bus_find_device_by_name(struct bus_type *bus,
- struct device *start,
- const char *name)
-{
- return bus_find_device(bus, start, name, device_match_name);
-}
-
-/**
- * bus_find_device_by_of_node : device iterator for locating a particular device
- * matching the of_node.
- * @bus: bus type
- * @np: of_node of the device to match.
- */
-static inline struct device *
-bus_find_device_by_of_node(struct bus_type *bus, const struct device_node *np)
-{
- return bus_find_device(bus, NULL, np, device_match_of_node);
-}
-
-/**
- * bus_find_device_by_fwnode : device iterator for locating a particular device
- * matching the fwnode.
- * @bus: bus type
- * @fwnode: fwnode of the device to match.
- */
-static inline struct device *
-bus_find_device_by_fwnode(struct bus_type *bus, const struct fwnode_handle *fwnode)
-{
- return bus_find_device(bus, NULL, fwnode, device_match_fwnode);
-}
-
-/**
- * bus_find_device_by_devt : device iterator for locating a particular device
- * matching the device type.
- * @bus: bus type
- * @devt: device type of the device to match.
- */
-static inline struct device *bus_find_device_by_devt(struct bus_type *bus,
- dev_t devt)
-{
- return bus_find_device(bus, NULL, &devt, device_match_devt);
-}
-
-/**
- * bus_find_next_device - Find the next device after a given device in a
- * given bus.
- * @bus: bus type
- * @cur: device to begin the search with.
- */
-static inline struct device *
-bus_find_next_device(struct bus_type *bus,struct device *cur)
-{
- return bus_find_device(bus, cur, NULL, device_match_any);
-}
-
-#ifdef CONFIG_ACPI
-struct acpi_device;
-
-/**
- * bus_find_device_by_acpi_dev : device iterator for locating a particular device
- * matching the ACPI COMPANION device.
- * @bus: bus type
- * @adev: ACPI COMPANION device to match.
- */
-static inline struct device *
-bus_find_device_by_acpi_dev(struct bus_type *bus, const struct acpi_device *adev)
-{
- return bus_find_device(bus, NULL, adev, device_match_acpi_dev);
-}
-#else
-static inline struct device *
-bus_find_device_by_acpi_dev(struct bus_type *bus, const void *adev)
-{
- return NULL;
-}
-#endif
-
-struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
- struct device *hint);
-int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
- void *data, int (*fn)(struct device_driver *, void *));
-void bus_sort_breadthfirst(struct bus_type *bus,
- int (*compare)(const struct device *a,
- const struct device *b));
-/*
- * Bus notifiers: Get notified of addition/removal of devices
- * and binding/unbinding of drivers to devices.
- * In the long run, it should be a replacement for the platform
- * notify hooks.
- */
-struct notifier_block;
-
-extern int bus_register_notifier(struct bus_type *bus,
- struct notifier_block *nb);
-extern int bus_unregister_notifier(struct bus_type *bus,
- struct notifier_block *nb);
-
-/* All 4 notifers below get called with the target struct device *
- * as an argument. Note that those functions are likely to be called
- * with the device lock held in the core, so be careful.
- */
-#define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
-#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */
-#define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */
-#define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be
- bound */
-#define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */
-#define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be
- unbound */
-#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound
- from the device */
-#define BUS_NOTIFY_DRIVER_NOT_BOUND 0x00000008 /* driver fails to be bound */
-
-extern struct kset *bus_get_kset(struct bus_type *bus);
-extern struct klist *bus_get_device_klist(struct bus_type *bus);
-
-/**
- * enum probe_type - device driver probe type to try
- * Device drivers may opt in for special handling of their
- * respective probe routines. This tells the core what to
- * expect and prefer.
- *
- * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well
- * whether probed synchronously or asynchronously.
- * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which
- * probing order is not essential for booting the system may
- * opt into executing their probes asynchronously.
- * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need
- * their probe routines to run synchronously with driver and
- * device registration (with the exception of -EPROBE_DEFER
- * handling - re-probing always ends up being done asynchronously).
- *
- * Note that the end goal is to switch the kernel to use asynchronous
- * probing by default, so annotating drivers with
- * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us
- * to speed up boot process while we are validating the rest of the
- * drivers.
- */
-enum probe_type {
- PROBE_DEFAULT_STRATEGY,
- PROBE_PREFER_ASYNCHRONOUS,
- PROBE_FORCE_SYNCHRONOUS,
-};
-
-/**
- * struct device_driver - The basic device driver structure
- * @name: Name of the device driver.
- * @bus: The bus which the device of this driver belongs to.
- * @owner: The module owner.
- * @mod_name: Used for built-in modules.
- * @suppress_bind_attrs: Disables bind/unbind via sysfs.
- * @probe_type: Type of the probe (synchronous or asynchronous) to use.
- * @of_match_table: The open firmware table.
- * @acpi_match_table: The ACPI match table.
- * @probe: Called to query the existence of a specific device,
- * whether this driver can work with it, and bind the driver
- * to a specific device.
- * @remove: Called when the device is removed from the system to
- * unbind a device from this driver.
- * @shutdown: Called at shut-down time to quiesce the device.
- * @suspend: Called to put the device to sleep mode. Usually to a
- * low power state.
- * @resume: Called to bring a device from sleep mode.
- * @groups: Default attributes that get created by the driver core
- * automatically.
- * @dev_groups: Additional attributes attached to device instance once the
- * it is bound to the driver.
- * @pm: Power management operations of the device which matched
- * this driver.
- * @coredump: Called when sysfs entry is written to. The device driver
- * is expected to call the dev_coredump API resulting in a
- * uevent.
- * @p: Driver core's private data, no one other than the driver
- * core can touch this.
- *
- * The device driver-model tracks all of the drivers known to the system.
- * The main reason for this tracking is to enable the driver core to match
- * up drivers with new devices. Once drivers are known objects within the
- * system, however, a number of other things become possible. Device drivers
- * can export information and configuration variables that are independent
- * of any specific device.
- */
-struct device_driver {
- const char *name;
- struct bus_type *bus;
-
- struct module *owner;
- const char *mod_name; /* used for built-in modules */
-
- bool suppress_bind_attrs; /* disables bind/unbind via sysfs */
- enum probe_type probe_type;
-
- const struct of_device_id *of_match_table;
- const struct acpi_device_id *acpi_match_table;
-
- int (*probe) (struct device *dev);
- int (*remove) (struct device *dev);
- void (*shutdown) (struct device *dev);
- int (*suspend) (struct device *dev, pm_message_t state);
- int (*resume) (struct device *dev);
- const struct attribute_group **groups;
- const struct attribute_group **dev_groups;
-
- const struct dev_pm_ops *pm;
- void (*coredump) (struct device *dev);
-
- struct driver_private *p;
-};
-
-
-extern int __must_check driver_register(struct device_driver *drv);
-extern void driver_unregister(struct device_driver *drv);
-
-extern struct device_driver *driver_find(const char *name,
- struct bus_type *bus);
-extern int driver_probe_done(void);
-extern void wait_for_device_probe(void);
-
-/* sysfs interface for exporting driver attributes */
-
-struct driver_attribute {
- struct attribute attr;
- ssize_t (*show)(struct device_driver *driver, char *buf);
- ssize_t (*store)(struct device_driver *driver, const char *buf,
- size_t count);
-};
-
-#define DRIVER_ATTR_RW(_name) \
- struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
-#define DRIVER_ATTR_RO(_name) \
- struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
-#define DRIVER_ATTR_WO(_name) \
- struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
-
-extern int __must_check driver_create_file(struct device_driver *driver,
- const struct driver_attribute *attr);
-extern void driver_remove_file(struct device_driver *driver,
- const struct driver_attribute *attr);
-
-extern int __must_check driver_for_each_device(struct device_driver *drv,
- struct device *start,
- void *data,
- int (*fn)(struct device *dev,
- void *));
-struct device *driver_find_device(struct device_driver *drv,
- struct device *start, const void *data,
- int (*match)(struct device *dev, const void *data));
-
-/**
- * driver_find_device_by_name - device iterator for locating a particular device
- * of a specific name.
- * @drv: the driver we're iterating
- * @name: name of the device to match
- */
-static inline struct device *driver_find_device_by_name(struct device_driver *drv,
- const char *name)
-{
- return driver_find_device(drv, NULL, name, device_match_name);
-}
-
-/**
- * driver_find_device_by_of_node- device iterator for locating a particular device
- * by of_node pointer.
- * @drv: the driver we're iterating
- * @np: of_node pointer to match.
- */
-static inline struct device *
-driver_find_device_by_of_node(struct device_driver *drv,
- const struct device_node *np)
-{
- return driver_find_device(drv, NULL, np, device_match_of_node);
-}
-
-/**
- * driver_find_device_by_fwnode- device iterator for locating a particular device
- * by fwnode pointer.
- * @drv: the driver we're iterating
- * @fwnode: fwnode pointer to match.
- */
-static inline struct device *
-driver_find_device_by_fwnode(struct device_driver *drv,
- const struct fwnode_handle *fwnode)
-{
- return driver_find_device(drv, NULL, fwnode, device_match_fwnode);
-}
-
-/**
- * driver_find_device_by_devt- device iterator for locating a particular device
- * by devt.
- * @drv: the driver we're iterating
- * @devt: devt pointer to match.
- */
-static inline struct device *driver_find_device_by_devt(struct device_driver *drv,
- dev_t devt)
-{
- return driver_find_device(drv, NULL, &devt, device_match_devt);
-}
-
-static inline struct device *driver_find_next_device(struct device_driver *drv,
- struct device *start)
-{
- return driver_find_device(drv, start, NULL, device_match_any);
-}
-
-#ifdef CONFIG_ACPI
-/**
- * driver_find_device_by_acpi_dev : device iterator for locating a particular
- * device matching the ACPI_COMPANION device.
- * @drv: the driver we're iterating
- * @adev: ACPI_COMPANION device to match.
- */
-static inline struct device *
-driver_find_device_by_acpi_dev(struct device_driver *drv,
- const struct acpi_device *adev)
-{
- return driver_find_device(drv, NULL, adev, device_match_acpi_dev);
-}
-#else
-static inline struct device *
-driver_find_device_by_acpi_dev(struct device_driver *drv, const void *adev)
-{
- return NULL;
-}
-#endif
-
-void driver_deferred_probe_add(struct device *dev);
-int driver_deferred_probe_check_state(struct device *dev);
-int driver_deferred_probe_check_state_continue(struct device *dev);
-
/**
* struct subsys_interface - interfaces to device functions
* @name: name of the device function
@@ -541,246 +75,6 @@ int subsys_system_register(struct bus_type *subsys,
int subsys_virtual_register(struct bus_type *subsys,
const struct attribute_group **groups);
-/**
- * struct class - device classes
- * @name: Name of the class.
- * @owner: The module owner.
- * @class_groups: Default attributes of this class.
- * @dev_groups: Default attributes of the devices that belong to the class.
- * @dev_kobj: The kobject that represents this class and links it into the hierarchy.
- * @dev_uevent: Called when a device is added, removed from this class, or a
- * few other things that generate uevents to add the environment
- * variables.
- * @devnode: Callback to provide the devtmpfs.
- * @class_release: Called to release this class.
- * @dev_release: Called to release the device.
- * @shutdown_pre: Called at shut-down time before driver shutdown.
- * @ns_type: Callbacks so sysfs can detemine namespaces.
- * @namespace: Namespace of the device belongs to this class.
- * @get_ownership: Allows class to specify uid/gid of the sysfs directories
- * for the devices belonging to the class. Usually tied to
- * device's namespace.
- * @pm: The default device power management operations of this class.
- * @p: The private data of the driver core, no one other than the
- * driver core can touch this.
- *
- * A class is a higher-level view of a device that abstracts out low-level
- * implementation details. Drivers may see a SCSI disk or an ATA disk, but,
- * at the class level, they are all simply disks. Classes allow user space
- * to work with devices based on what they do, rather than how they are
- * connected or how they work.
- */
-struct class {
- const char *name;
- struct module *owner;
-
- const struct attribute_group **class_groups;
- const struct attribute_group **dev_groups;
- struct kobject *dev_kobj;
-
- int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
- char *(*devnode)(struct device *dev, umode_t *mode);
-
- void (*class_release)(struct class *class);
- void (*dev_release)(struct device *dev);
-
- int (*shutdown_pre)(struct device *dev);
-
- const struct kobj_ns_type_operations *ns_type;
- const void *(*namespace)(struct device *dev);
-
- void (*get_ownership)(struct device *dev, kuid_t *uid, kgid_t *gid);
-
- const struct dev_pm_ops *pm;
-
- struct subsys_private *p;
-};
-
-struct class_dev_iter {
- struct klist_iter ki;
- const struct device_type *type;
-};
-
-extern struct kobject *sysfs_dev_block_kobj;
-extern struct kobject *sysfs_dev_char_kobj;
-extern int __must_check __class_register(struct class *class,
- struct lock_class_key *key);
-extern void class_unregister(struct class *class);
-
-/* This is a #define to keep the compiler from merging different
- * instances of the __key variable */
-#define class_register(class) \
-({ \
- static struct lock_class_key __key; \
- __class_register(class, &__key); \
-})
-
-struct class_compat;
-struct class_compat *class_compat_register(const char *name);
-void class_compat_unregister(struct class_compat *cls);
-int class_compat_create_link(struct class_compat *cls, struct device *dev,
- struct device *device_link);
-void class_compat_remove_link(struct class_compat *cls, struct device *dev,
- struct device *device_link);
-
-extern void class_dev_iter_init(struct class_dev_iter *iter,
- struct class *class,
- struct device *start,
- const struct device_type *type);
-extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
-extern void class_dev_iter_exit(struct class_dev_iter *iter);
-
-extern int class_for_each_device(struct class *class, struct device *start,
- void *data,
- int (*fn)(struct device *dev, void *data));
-extern struct device *class_find_device(struct class *class,
- struct device *start, const void *data,
- int (*match)(struct device *, const void *));
-
-/**
- * class_find_device_by_name - device iterator for locating a particular device
- * of a specific name.
- * @class: class type
- * @name: name of the device to match
- */
-static inline struct device *class_find_device_by_name(struct class *class,
- const char *name)
-{
- return class_find_device(class, NULL, name, device_match_name);
-}
-
-/**
- * class_find_device_by_of_node : device iterator for locating a particular device
- * matching the of_node.
- * @class: class type
- * @np: of_node of the device to match.
- */
-static inline struct device *
-class_find_device_by_of_node(struct class *class, const struct device_node *np)
-{
- return class_find_device(class, NULL, np, device_match_of_node);
-}
-
-/**
- * class_find_device_by_fwnode : device iterator for locating a particular device
- * matching the fwnode.
- * @class: class type
- * @fwnode: fwnode of the device to match.
- */
-static inline struct device *
-class_find_device_by_fwnode(struct class *class,
- const struct fwnode_handle *fwnode)
-{
- return class_find_device(class, NULL, fwnode, device_match_fwnode);
-}
-
-/**
- * class_find_device_by_devt : device iterator for locating a particular device
- * matching the device type.
- * @class: class type
- * @devt: device type of the device to match.
- */
-static inline struct device *class_find_device_by_devt(struct class *class,
- dev_t devt)
-{
- return class_find_device(class, NULL, &devt, device_match_devt);
-}
-
-#ifdef CONFIG_ACPI
-struct acpi_device;
-/**
- * class_find_device_by_acpi_dev : device iterator for locating a particular
- * device matching the ACPI_COMPANION device.
- * @class: class type
- * @adev: ACPI_COMPANION device to match.
- */
-static inline struct device *
-class_find_device_by_acpi_dev(struct class *class, const struct acpi_device *adev)
-{
- return class_find_device(class, NULL, adev, device_match_acpi_dev);
-}
-#else
-static inline struct device *
-class_find_device_by_acpi_dev(struct class *class, const void *adev)
-{
- return NULL;
-}
-#endif
-
-struct class_attribute {
- struct attribute attr;
- ssize_t (*show)(struct class *class, struct class_attribute *attr,
- char *buf);
- ssize_t (*store)(struct class *class, struct class_attribute *attr,
- const char *buf, size_t count);
-};
-
-#define CLASS_ATTR_RW(_name) \
- struct class_attribute class_attr_##_name = __ATTR_RW(_name)
-#define CLASS_ATTR_RO(_name) \
- struct class_attribute class_attr_##_name = __ATTR_RO(_name)
-#define CLASS_ATTR_WO(_name) \
- struct class_attribute class_attr_##_name = __ATTR_WO(_name)
-
-extern int __must_check class_create_file_ns(struct class *class,
- const struct class_attribute *attr,
- const void *ns);
-extern void class_remove_file_ns(struct class *class,
- const struct class_attribute *attr,
- const void *ns);
-
-static inline int __must_check class_create_file(struct class *class,
- const struct class_attribute *attr)
-{
- return class_create_file_ns(class, attr, NULL);
-}
-
-static inline void class_remove_file(struct class *class,
- const struct class_attribute *attr)
-{
- return class_remove_file_ns(class, attr, NULL);
-}
-
-/* Simple class attribute that is just a static string */
-struct class_attribute_string {
- struct class_attribute attr;
- char *str;
-};
-
-/* Currently read-only only */
-#define _CLASS_ATTR_STRING(_name, _mode, _str) \
- { __ATTR(_name, _mode, show_class_attr_string, NULL), _str }
-#define CLASS_ATTR_STRING(_name, _mode, _str) \
- struct class_attribute_string class_attr_##_name = \
- _CLASS_ATTR_STRING(_name, _mode, _str)
-
-extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
- char *buf);
-
-struct class_interface {
- struct list_head node;
- struct class *class;
-
- int (*add_dev) (struct device *, struct class_interface *);
- void (*remove_dev) (struct device *, struct class_interface *);
-};
-
-extern int __must_check class_interface_register(struct class_interface *);
-extern void class_interface_unregister(struct class_interface *);
-
-extern struct class * __must_check __class_create(struct module *owner,
- const char *name,
- struct lock_class_key *key);
-extern void class_destroy(struct class *cls);
-
-/* This is a #define to keep the compiler from merging different
- * instances of the __key variable */
-#define class_create(owner, name) \
-({ \
- static struct lock_class_key __key; \
- __class_create(owner, name, &__key); \
-})
-
/*
* The type of device, "struct device" is embedded in. A class
* or bus can contain devices of different types
@@ -946,6 +240,8 @@ extern void devm_free_pages(struct device *dev, unsigned long addr);
void __iomem *devm_ioremap_resource(struct device *dev,
const struct resource *res);
+void __iomem *devm_ioremap_resource_wc(struct device *dev,
+ const struct resource *res);
void __iomem *devm_of_iomap(struct device *dev,
struct device_node *node, int index,
@@ -1080,6 +376,7 @@ enum device_link_state {
* AUTOREMOVE_SUPPLIER: Remove the link automatically on supplier driver unbind.
* AUTOPROBE_CONSUMER: Probe consumer driver automatically after supplier binds.
* MANAGED: The core tracks presence of supplier/consumer drivers (internal).
+ * SYNC_STATE_ONLY: Link only affects sync_state() behavior.
*/
#define DL_FLAG_STATELESS BIT(0)
#define DL_FLAG_AUTOREMOVE_CONSUMER BIT(1)
@@ -1088,6 +385,7 @@ enum device_link_state {
#define DL_FLAG_AUTOREMOVE_SUPPLIER BIT(4)
#define DL_FLAG_AUTOPROBE_CONSUMER BIT(5)
#define DL_FLAG_MANAGED BIT(6)
+#define DL_FLAG_SYNC_STATE_ONLY BIT(7)
/**
* struct device_link - Device link representation.
@@ -1135,11 +433,18 @@ enum dl_dev_state {
* struct dev_links_info - Device data related to device links.
* @suppliers: List of links to supplier devices.
* @consumers: List of links to consumer devices.
+ * @needs_suppliers: Hook to global list of devices waiting for suppliers.
+ * @defer_sync: Hook to global list of devices that have deferred sync_state.
+ * @need_for_probe: If needs_suppliers is on a list, this indicates if the
+ * suppliers are needed for probe or not.
* @status: Driver status information.
*/
struct dev_links_info {
struct list_head suppliers;
struct list_head consumers;
+ struct list_head needs_suppliers;
+ struct list_head defer_sync;
+ bool need_for_probe;
enum dl_dev_state status;
};
@@ -1186,8 +491,8 @@ struct dev_links_info {
* @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all
* hardware supports 64-bit addresses for consistent allocations
* such descriptors.
- * @bus_dma_mask: Mask of an upstream bridge or bus which imposes a smaller DMA
- * limit than the device itself supports.
+ * @bus_dma_limit: Limit of an upstream bridge or bus which imposes a smaller
+ * DMA limit than the device itself supports.
* @dma_pfn_offset: offset of DMA memory range relatively of RAM
* @dma_parms: A low level driver may set these to teach IOMMU code about
* segment limitations.
@@ -1215,6 +520,9 @@ struct dev_links_info {
* @offline: Set after successful invocation of bus type's .offline().
* @of_node_reused: Set if the device-tree node is shared with an ancestor
* device.
+ * @state_synced: The hardware state of this device has been synced to match
+ * the software state of this device by calling the driver/bus
+ * sync_state() callback.
* @dma_coherent: this particular device is dma coherent, even if the
* architecture supports non-coherent devices.
*
@@ -1270,7 +578,7 @@ struct device {
not all hardware supports
64 bit addresses for consistent
allocations such descriptors. */
- u64 bus_dma_mask; /* upstream dma_mask constraint */
+ u64 bus_dma_limit; /* upstream dma constraint */
unsigned long dma_pfn_offset;
struct device_dma_parameters *dma_parms;
@@ -1311,6 +619,7 @@ struct device {
bool offline_disabled:1;
bool offline:1;
bool of_node_reused:1;
+ bool state_synced:1;
#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)
@@ -1489,7 +798,16 @@ static inline struct device_node *dev_of_node(struct device *dev)
return dev->of_node;
}
-void driver_init(void);
+static inline bool dev_has_sync_state(struct device *dev)
+{
+ if (!dev)
+ return false;
+ if (dev->driver && dev->driver->sync_state)
+ return true;
+ if (dev->bus && dev->bus->sync_state)
+ return true;
+ return false;
+}
/*
* High level routines for use by the bus drivers
@@ -1633,13 +951,9 @@ extern void put_device(struct device *dev);
extern bool kill_device(struct device *dev);
#ifdef CONFIG_DEVTMPFS
-extern int devtmpfs_create_node(struct device *dev);
-extern int devtmpfs_delete_node(struct device *dev);
-extern int devtmpfs_mount(const char *mntdir);
+extern int devtmpfs_mount(void);
#else
-static inline int devtmpfs_create_node(struct device *dev) { return 0; }
-static inline int devtmpfs_delete_node(struct device *dev) { return 0; }
-static inline int devtmpfs_mount(const char *mountpoint) { return 0; }
+static inline int devtmpfs_mount(void) { return 0; }
#endif
/* drivers/base/power/shutdown.c */
@@ -1653,221 +967,8 @@ struct device_link *device_link_add(struct device *consumer,
struct device *supplier, u32 flags);
void device_link_del(struct device_link *link);
void device_link_remove(void *consumer, struct device *supplier);
-
-#ifndef dev_fmt
-#define dev_fmt(fmt) fmt
-#endif
-
-#ifdef CONFIG_PRINTK
-
-__printf(3, 0) __cold
-int dev_vprintk_emit(int level, const struct device *dev,
- const char *fmt, va_list args);
-__printf(3, 4) __cold
-int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...);
-
-__printf(3, 4) __cold
-void dev_printk(const char *level, const struct device *dev,
- const char *fmt, ...);
-__printf(2, 3) __cold
-void _dev_emerg(const struct device *dev, const char *fmt, ...);
-__printf(2, 3) __cold
-void _dev_alert(const struct device *dev, const char *fmt, ...);
-__printf(2, 3) __cold
-void _dev_crit(const struct device *dev, const char *fmt, ...);
-__printf(2, 3) __cold
-void _dev_err(const struct device *dev, const char *fmt, ...);
-__printf(2, 3) __cold
-void _dev_warn(const struct device *dev, const char *fmt, ...);
-__printf(2, 3) __cold
-void _dev_notice(const struct device *dev, const char *fmt, ...);
-__printf(2, 3) __cold
-void _dev_info(const struct device *dev, const char *fmt, ...);
-
-#else
-
-static inline __printf(3, 0)
-int dev_vprintk_emit(int level, const struct device *dev,
- const char *fmt, va_list args)
-{ return 0; }
-static inline __printf(3, 4)
-int dev_printk_emit(int level, const struct device *dev, const char *fmt, ...)
-{ return 0; }
-
-static inline void __dev_printk(const char *level, const struct device *dev,
- struct va_format *vaf)
-{}
-static inline __printf(3, 4)
-void dev_printk(const char *level, const struct device *dev,
- const char *fmt, ...)
-{}
-
-static inline __printf(2, 3)
-void _dev_emerg(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void _dev_crit(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void _dev_alert(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void _dev_err(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void _dev_warn(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void _dev_notice(const struct device *dev, const char *fmt, ...)
-{}
-static inline __printf(2, 3)
-void _dev_info(const struct device *dev, const char *fmt, ...)
-{}
-
-#endif
-
-/*
- * #defines for all the dev_<level> macros to prefix with whatever
- * possible use of #define dev_fmt(fmt) ...
- */
-
-#define dev_emerg(dev, fmt, ...) \
- _dev_emerg(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_crit(dev, fmt, ...) \
- _dev_crit(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_alert(dev, fmt, ...) \
- _dev_alert(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_err(dev, fmt, ...) \
- _dev_err(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_warn(dev, fmt, ...) \
- _dev_warn(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_notice(dev, fmt, ...) \
- _dev_notice(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#define dev_info(dev, fmt, ...) \
- _dev_info(dev, dev_fmt(fmt), ##__VA_ARGS__)
-
-#if defined(CONFIG_DYNAMIC_DEBUG)
-#define dev_dbg(dev, fmt, ...) \
- dynamic_dev_dbg(dev, dev_fmt(fmt), ##__VA_ARGS__)
-#elif defined(DEBUG)
-#define dev_dbg(dev, fmt, ...) \
- dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__)
-#else
-#define dev_dbg(dev, fmt, ...) \
-({ \
- if (0) \
- dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
-})
-#endif
-
-#ifdef CONFIG_PRINTK
-#define dev_level_once(dev_level, dev, fmt, ...) \
-do { \
- static bool __print_once __read_mostly; \
- \
- if (!__print_once) { \
- __print_once = true; \
- dev_level(dev, fmt, ##__VA_ARGS__); \
- } \
-} while (0)
-#else
-#define dev_level_once(dev_level, dev, fmt, ...) \
-do { \
- if (0) \
- dev_level(dev, fmt, ##__VA_ARGS__); \
-} while (0)
-#endif
-
-#define dev_emerg_once(dev, fmt, ...) \
- dev_level_once(dev_emerg, dev, fmt, ##__VA_ARGS__)
-#define dev_alert_once(dev, fmt, ...) \
- dev_level_once(dev_alert, dev, fmt, ##__VA_ARGS__)
-#define dev_crit_once(dev, fmt, ...) \
- dev_level_once(dev_crit, dev, fmt, ##__VA_ARGS__)
-#define dev_err_once(dev, fmt, ...) \
- dev_level_once(dev_err, dev, fmt, ##__VA_ARGS__)
-#define dev_warn_once(dev, fmt, ...) \
- dev_level_once(dev_warn, dev, fmt, ##__VA_ARGS__)
-#define dev_notice_once(dev, fmt, ...) \
- dev_level_once(dev_notice, dev, fmt, ##__VA_ARGS__)
-#define dev_info_once(dev, fmt, ...) \
- dev_level_once(dev_info, dev, fmt, ##__VA_ARGS__)
-#define dev_dbg_once(dev, fmt, ...) \
- dev_level_once(dev_dbg, dev, fmt, ##__VA_ARGS__)
-
-#define dev_level_ratelimited(dev_level, dev, fmt, ...) \
-do { \
- static DEFINE_RATELIMIT_STATE(_rs, \
- DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- if (__ratelimit(&_rs)) \
- dev_level(dev, fmt, ##__VA_ARGS__); \
-} while (0)
-
-#define dev_emerg_ratelimited(dev, fmt, ...) \
- dev_level_ratelimited(dev_emerg, dev, fmt, ##__VA_ARGS__)
-#define dev_alert_ratelimited(dev, fmt, ...) \
- dev_level_ratelimited(dev_alert, dev, fmt, ##__VA_ARGS__)
-#define dev_crit_ratelimited(dev, fmt, ...) \
- dev_level_ratelimited(dev_crit, dev, fmt, ##__VA_ARGS__)
-#define dev_err_ratelimited(dev, fmt, ...) \
- dev_level_ratelimited(dev_err, dev, fmt, ##__VA_ARGS__)
-#define dev_warn_ratelimited(dev, fmt, ...) \
- dev_level_ratelimited(dev_warn, dev, fmt, ##__VA_ARGS__)
-#define dev_notice_ratelimited(dev, fmt, ...) \
- dev_level_ratelimited(dev_notice, dev, fmt, ##__VA_ARGS__)
-#define dev_info_ratelimited(dev, fmt, ...) \
- dev_level_ratelimited(dev_info, dev, fmt, ##__VA_ARGS__)
-#if defined(CONFIG_DYNAMIC_DEBUG)
-/* descriptor check is first to prevent flooding with "callbacks suppressed" */
-#define dev_dbg_ratelimited(dev, fmt, ...) \
-do { \
- static DEFINE_RATELIMIT_STATE(_rs, \
- DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- DEFINE_DYNAMIC_DEBUG_METADATA(descriptor, fmt); \
- if (DYNAMIC_DEBUG_BRANCH(descriptor) && \
- __ratelimit(&_rs)) \
- __dynamic_dev_dbg(&descriptor, dev, dev_fmt(fmt), \
- ##__VA_ARGS__); \
-} while (0)
-#elif defined(DEBUG)
-#define dev_dbg_ratelimited(dev, fmt, ...) \
-do { \
- static DEFINE_RATELIMIT_STATE(_rs, \
- DEFAULT_RATELIMIT_INTERVAL, \
- DEFAULT_RATELIMIT_BURST); \
- if (__ratelimit(&_rs)) \
- dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
-} while (0)
-#else
-#define dev_dbg_ratelimited(dev, fmt, ...) \
-do { \
- if (0) \
- dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
-} while (0)
-#endif
-
-#ifdef VERBOSE_DEBUG
-#define dev_vdbg dev_dbg
-#else
-#define dev_vdbg(dev, fmt, ...) \
-({ \
- if (0) \
- dev_printk(KERN_DEBUG, dev, dev_fmt(fmt), ##__VA_ARGS__); \
-})
-#endif
-
-/*
- * dev_WARN*() acts like dev_printk(), but with the key difference of
- * using WARN/WARN_ONCE to include file/line information and a backtrace.
- */
-#define dev_WARN(dev, format, arg...) \
- WARN(1, "%s %s: " format, dev_driver_string(dev), dev_name(dev), ## arg);
-
-#define dev_WARN_ONCE(dev, condition, format, arg...) \
- WARN_ONCE(condition, "%s %s: " format, \
- dev_driver_string(dev), dev_name(dev), ## arg)
+void device_links_supplier_sync_state_pause(void);
+void device_links_supplier_sync_state_resume(void);
/* Create alias, so I can be autoloaded. */
#define MODULE_ALIAS_CHARDEV(major,minor) \
@@ -1881,52 +982,4 @@ extern long sysfs_deprecated;
#define sysfs_deprecated 0
#endif
-/**
- * module_driver() - Helper macro for drivers that don't do anything
- * special in module init/exit. This eliminates a lot of boilerplate.
- * Each module may only use this macro once, and calling it replaces
- * module_init() and module_exit().
- *
- * @__driver: driver name
- * @__register: register function for this driver type
- * @__unregister: unregister function for this driver type
- * @...: Additional arguments to be passed to __register and __unregister.
- *
- * Use this macro to construct bus specific macros for registering
- * drivers, and do not use it on its own.
- */
-#define module_driver(__driver, __register, __unregister, ...) \
-static int __init __driver##_init(void) \
-{ \
- return __register(&(__driver) , ##__VA_ARGS__); \
-} \
-module_init(__driver##_init); \
-static void __exit __driver##_exit(void) \
-{ \
- __unregister(&(__driver) , ##__VA_ARGS__); \
-} \
-module_exit(__driver##_exit);
-
-/**
- * builtin_driver() - Helper macro for drivers that don't do anything
- * special in init and have no exit. This eliminates some boilerplate.
- * Each driver may only use this macro once, and calling it replaces
- * device_initcall (or in some cases, the legacy __initcall). This is
- * meant to be a direct parallel of module_driver() above but without
- * the __exit stuff that is not used for builtin cases.
- *
- * @__driver: driver name
- * @__register: register function for this driver type
- * @...: Additional arguments to be passed to __register
- *
- * Use this macro to construct bus specific macros for registering
- * drivers, and do not use it on its own.
- */
-#define builtin_driver(__driver, __register, ...) \
-static int __init __driver##_init(void) \
-{ \
- return __register(&(__driver) , ##__VA_ARGS__); \
-} \
-device_initcall(__driver##_init);
-
#endif /* _DEVICE_H_ */
diff --git a/include/linux/device/bus.h b/include/linux/device/bus.h
new file mode 100644
index 000000000000..1ea5e1d1545b
--- /dev/null
+++ b/include/linux/device/bus.h
@@ -0,0 +1,288 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * bus.h - the bus-specific portions of the driver model
+ *
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2008-2009 Novell Inc.
+ * Copyright (c) 2012-2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2012-2019 Linux Foundation
+ *
+ * See Documentation/driver-api/driver-model/ for more information.
+ */
+
+#ifndef _DEVICE_BUS_H_
+#define _DEVICE_BUS_H_
+
+#include <linux/kobject.h>
+#include <linux/klist.h>
+#include <linux/pm.h>
+
+struct device_driver;
+struct fwnode_handle;
+
+/**
+ * struct bus_type - The bus type of the device
+ *
+ * @name: The name of the bus.
+ * @dev_name: Used for subsystems to enumerate devices like ("foo%u", dev->id).
+ * @dev_root: Default device to use as the parent.
+ * @bus_groups: Default attributes of the bus.
+ * @dev_groups: Default attributes of the devices on the bus.
+ * @drv_groups: Default attributes of the device drivers on the bus.
+ * @match: Called, perhaps multiple times, whenever a new device or driver
+ * is added for this bus. It should return a positive value if the
+ * given device can be handled by the given driver and zero
+ * otherwise. It may also return error code if determining that
+ * the driver supports the device is not possible. In case of
+ * -EPROBE_DEFER it will queue the device for deferred probing.
+ * @uevent: Called when a device is added, removed, or a few other things
+ * that generate uevents to add the environment variables.
+ * @probe: Called when a new device or driver add to this bus, and callback
+ * the specific driver's probe to initial the matched device.
+ * @sync_state: Called to sync device state to software state after all the
+ * state tracking consumers linked to this device (present at
+ * the time of late_initcall) have successfully bound to a
+ * driver. If the device has no consumers, this function will
+ * be called at late_initcall_sync level. If the device has
+ * consumers that are never bound to a driver, this function
+ * will never get called until they do.
+ * @remove: Called when a device removed from this bus.
+ * @shutdown: Called at shut-down time to quiesce the device.
+ *
+ * @online: Called to put the device back online (after offlining it).
+ * @offline: Called to put the device offline for hot-removal. May fail.
+ *
+ * @suspend: Called when a device on this bus wants to go to sleep mode.
+ * @resume: Called to bring a device on this bus out of sleep mode.
+ * @num_vf: Called to find out how many virtual functions a device on this
+ * bus supports.
+ * @dma_configure: Called to setup DMA configuration on a device on
+ * this bus.
+ * @pm: Power management operations of this bus, callback the specific
+ * device driver's pm-ops.
+ * @iommu_ops: IOMMU specific operations for this bus, used to attach IOMMU
+ * driver implementations to a bus and allow the driver to do
+ * bus-specific setup
+ * @p: The private data of the driver core, only the driver core can
+ * touch this.
+ * @lock_key: Lock class key for use by the lock validator
+ * @need_parent_lock: When probing or removing a device on this bus, the
+ * device core should lock the device's parent.
+ *
+ * A bus is a channel between the processor and one or more devices. For the
+ * purposes of the device model, all devices are connected via a bus, even if
+ * it is an internal, virtual, "platform" bus. Buses can plug into each other.
+ * A USB controller is usually a PCI device, for example. The device model
+ * represents the actual connections between buses and the devices they control.
+ * A bus is represented by the bus_type structure. It contains the name, the
+ * default attributes, the bus' methods, PM operations, and the driver core's
+ * private data.
+ */
+struct bus_type {
+ const char *name;
+ const char *dev_name;
+ struct device *dev_root;
+ const struct attribute_group **bus_groups;
+ const struct attribute_group **dev_groups;
+ const struct attribute_group **drv_groups;
+
+ int (*match)(struct device *dev, struct device_driver *drv);
+ int (*uevent)(struct device *dev, struct kobj_uevent_env *env);
+ int (*probe)(struct device *dev);
+ void (*sync_state)(struct device *dev);
+ int (*remove)(struct device *dev);
+ void (*shutdown)(struct device *dev);
+
+ int (*online)(struct device *dev);
+ int (*offline)(struct device *dev);
+
+ int (*suspend)(struct device *dev, pm_message_t state);
+ int (*resume)(struct device *dev);
+
+ int (*num_vf)(struct device *dev);
+
+ int (*dma_configure)(struct device *dev);
+
+ const struct dev_pm_ops *pm;
+
+ const struct iommu_ops *iommu_ops;
+
+ struct subsys_private *p;
+ struct lock_class_key lock_key;
+
+ bool need_parent_lock;
+};
+
+extern int __must_check bus_register(struct bus_type *bus);
+
+extern void bus_unregister(struct bus_type *bus);
+
+extern int __must_check bus_rescan_devices(struct bus_type *bus);
+
+struct bus_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct bus_type *bus, char *buf);
+ ssize_t (*store)(struct bus_type *bus, const char *buf, size_t count);
+};
+
+#define BUS_ATTR_RW(_name) \
+ struct bus_attribute bus_attr_##_name = __ATTR_RW(_name)
+#define BUS_ATTR_RO(_name) \
+ struct bus_attribute bus_attr_##_name = __ATTR_RO(_name)
+#define BUS_ATTR_WO(_name) \
+ struct bus_attribute bus_attr_##_name = __ATTR_WO(_name)
+
+extern int __must_check bus_create_file(struct bus_type *,
+ struct bus_attribute *);
+extern void bus_remove_file(struct bus_type *, struct bus_attribute *);
+
+/* Generic device matching functions that all busses can use to match with */
+int device_match_name(struct device *dev, const void *name);
+int device_match_of_node(struct device *dev, const void *np);
+int device_match_fwnode(struct device *dev, const void *fwnode);
+int device_match_devt(struct device *dev, const void *pdevt);
+int device_match_acpi_dev(struct device *dev, const void *adev);
+int device_match_any(struct device *dev, const void *unused);
+
+/* iterator helpers for buses */
+struct subsys_dev_iter {
+ struct klist_iter ki;
+ const struct device_type *type;
+};
+void subsys_dev_iter_init(struct subsys_dev_iter *iter,
+ struct bus_type *subsys,
+ struct device *start,
+ const struct device_type *type);
+struct device *subsys_dev_iter_next(struct subsys_dev_iter *iter);
+void subsys_dev_iter_exit(struct subsys_dev_iter *iter);
+
+int bus_for_each_dev(struct bus_type *bus, struct device *start, void *data,
+ int (*fn)(struct device *dev, void *data));
+struct device *bus_find_device(struct bus_type *bus, struct device *start,
+ const void *data,
+ int (*match)(struct device *dev, const void *data));
+/**
+ * bus_find_device_by_name - device iterator for locating a particular device
+ * of a specific name.
+ * @bus: bus type
+ * @start: Device to begin with
+ * @name: name of the device to match
+ */
+static inline struct device *bus_find_device_by_name(struct bus_type *bus,
+ struct device *start,
+ const char *name)
+{
+ return bus_find_device(bus, start, name, device_match_name);
+}
+
+/**
+ * bus_find_device_by_of_node : device iterator for locating a particular device
+ * matching the of_node.
+ * @bus: bus type
+ * @np: of_node of the device to match.
+ */
+static inline struct device *
+bus_find_device_by_of_node(struct bus_type *bus, const struct device_node *np)
+{
+ return bus_find_device(bus, NULL, np, device_match_of_node);
+}
+
+/**
+ * bus_find_device_by_fwnode : device iterator for locating a particular device
+ * matching the fwnode.
+ * @bus: bus type
+ * @fwnode: fwnode of the device to match.
+ */
+static inline struct device *
+bus_find_device_by_fwnode(struct bus_type *bus, const struct fwnode_handle *fwnode)
+{
+ return bus_find_device(bus, NULL, fwnode, device_match_fwnode);
+}
+
+/**
+ * bus_find_device_by_devt : device iterator for locating a particular device
+ * matching the device type.
+ * @bus: bus type
+ * @devt: device type of the device to match.
+ */
+static inline struct device *bus_find_device_by_devt(struct bus_type *bus,
+ dev_t devt)
+{
+ return bus_find_device(bus, NULL, &devt, device_match_devt);
+}
+
+/**
+ * bus_find_next_device - Find the next device after a given device in a
+ * given bus.
+ * @bus: bus type
+ * @cur: device to begin the search with.
+ */
+static inline struct device *
+bus_find_next_device(struct bus_type *bus,struct device *cur)
+{
+ return bus_find_device(bus, cur, NULL, device_match_any);
+}
+
+#ifdef CONFIG_ACPI
+struct acpi_device;
+
+/**
+ * bus_find_device_by_acpi_dev : device iterator for locating a particular device
+ * matching the ACPI COMPANION device.
+ * @bus: bus type
+ * @adev: ACPI COMPANION device to match.
+ */
+static inline struct device *
+bus_find_device_by_acpi_dev(struct bus_type *bus, const struct acpi_device *adev)
+{
+ return bus_find_device(bus, NULL, adev, device_match_acpi_dev);
+}
+#else
+static inline struct device *
+bus_find_device_by_acpi_dev(struct bus_type *bus, const void *adev)
+{
+ return NULL;
+}
+#endif
+
+struct device *subsys_find_device_by_id(struct bus_type *bus, unsigned int id,
+ struct device *hint);
+int bus_for_each_drv(struct bus_type *bus, struct device_driver *start,
+ void *data, int (*fn)(struct device_driver *, void *));
+void bus_sort_breadthfirst(struct bus_type *bus,
+ int (*compare)(const struct device *a,
+ const struct device *b));
+/*
+ * Bus notifiers: Get notified of addition/removal of devices
+ * and binding/unbinding of drivers to devices.
+ * In the long run, it should be a replacement for the platform
+ * notify hooks.
+ */
+struct notifier_block;
+
+extern int bus_register_notifier(struct bus_type *bus,
+ struct notifier_block *nb);
+extern int bus_unregister_notifier(struct bus_type *bus,
+ struct notifier_block *nb);
+
+/* All 4 notifers below get called with the target struct device *
+ * as an argument. Note that those functions are likely to be called
+ * with the device lock held in the core, so be careful.
+ */
+#define BUS_NOTIFY_ADD_DEVICE 0x00000001 /* device added */
+#define BUS_NOTIFY_DEL_DEVICE 0x00000002 /* device to be removed */
+#define BUS_NOTIFY_REMOVED_DEVICE 0x00000003 /* device removed */
+#define BUS_NOTIFY_BIND_DRIVER 0x00000004 /* driver about to be
+ bound */
+#define BUS_NOTIFY_BOUND_DRIVER 0x00000005 /* driver bound to device */
+#define BUS_NOTIFY_UNBIND_DRIVER 0x00000006 /* driver about to be
+ unbound */
+#define BUS_NOTIFY_UNBOUND_DRIVER 0x00000007 /* driver is unbound
+ from the device */
+#define BUS_NOTIFY_DRIVER_NOT_BOUND 0x00000008 /* driver fails to be bound */
+
+extern struct kset *bus_get_kset(struct bus_type *bus);
+extern struct klist *bus_get_device_klist(struct bus_type *bus);
+
+#endif
diff --git a/include/linux/device/class.h b/include/linux/device/class.h
new file mode 100644
index 000000000000..e8d470c457d1
--- /dev/null
+++ b/include/linux/device/class.h
@@ -0,0 +1,266 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The class-specific portions of the driver model
+ *
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2008-2009 Novell Inc.
+ * Copyright (c) 2012-2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2012-2019 Linux Foundation
+ *
+ * See Documentation/driver-api/driver-model/ for more information.
+ */
+
+#ifndef _DEVICE_CLASS_H_
+#define _DEVICE_CLASS_H_
+
+#include <linux/kobject.h>
+#include <linux/klist.h>
+#include <linux/pm.h>
+#include <linux/device/bus.h>
+
+struct device;
+struct fwnode_handle;
+
+/**
+ * struct class - device classes
+ * @name: Name of the class.
+ * @owner: The module owner.
+ * @class_groups: Default attributes of this class.
+ * @dev_groups: Default attributes of the devices that belong to the class.
+ * @dev_kobj: The kobject that represents this class and links it into the hierarchy.
+ * @dev_uevent: Called when a device is added, removed from this class, or a
+ * few other things that generate uevents to add the environment
+ * variables.
+ * @devnode: Callback to provide the devtmpfs.
+ * @class_release: Called to release this class.
+ * @dev_release: Called to release the device.
+ * @shutdown_pre: Called at shut-down time before driver shutdown.
+ * @ns_type: Callbacks so sysfs can detemine namespaces.
+ * @namespace: Namespace of the device belongs to this class.
+ * @get_ownership: Allows class to specify uid/gid of the sysfs directories
+ * for the devices belonging to the class. Usually tied to
+ * device's namespace.
+ * @pm: The default device power management operations of this class.
+ * @p: The private data of the driver core, no one other than the
+ * driver core can touch this.
+ *
+ * A class is a higher-level view of a device that abstracts out low-level
+ * implementation details. Drivers may see a SCSI disk or an ATA disk, but,
+ * at the class level, they are all simply disks. Classes allow user space
+ * to work with devices based on what they do, rather than how they are
+ * connected or how they work.
+ */
+struct class {
+ const char *name;
+ struct module *owner;
+
+ const struct attribute_group **class_groups;
+ const struct attribute_group **dev_groups;
+ struct kobject *dev_kobj;
+
+ int (*dev_uevent)(struct device *dev, struct kobj_uevent_env *env);
+ char *(*devnode)(struct device *dev, umode_t *mode);
+
+ void (*class_release)(struct class *class);
+ void (*dev_release)(struct device *dev);
+
+ int (*shutdown_pre)(struct device *dev);
+
+ const struct kobj_ns_type_operations *ns_type;
+ const void *(*namespace)(struct device *dev);
+
+ void (*get_ownership)(struct device *dev, kuid_t *uid, kgid_t *gid);
+
+ const struct dev_pm_ops *pm;
+
+ struct subsys_private *p;
+};
+
+struct class_dev_iter {
+ struct klist_iter ki;
+ const struct device_type *type;
+};
+
+extern struct kobject *sysfs_dev_block_kobj;
+extern struct kobject *sysfs_dev_char_kobj;
+extern int __must_check __class_register(struct class *class,
+ struct lock_class_key *key);
+extern void class_unregister(struct class *class);
+
+/* This is a #define to keep the compiler from merging different
+ * instances of the __key variable */
+#define class_register(class) \
+({ \
+ static struct lock_class_key __key; \
+ __class_register(class, &__key); \
+})
+
+struct class_compat;
+struct class_compat *class_compat_register(const char *name);
+void class_compat_unregister(struct class_compat *cls);
+int class_compat_create_link(struct class_compat *cls, struct device *dev,
+ struct device *device_link);
+void class_compat_remove_link(struct class_compat *cls, struct device *dev,
+ struct device *device_link);
+
+extern void class_dev_iter_init(struct class_dev_iter *iter,
+ struct class *class,
+ struct device *start,
+ const struct device_type *type);
+extern struct device *class_dev_iter_next(struct class_dev_iter *iter);
+extern void class_dev_iter_exit(struct class_dev_iter *iter);
+
+extern int class_for_each_device(struct class *class, struct device *start,
+ void *data,
+ int (*fn)(struct device *dev, void *data));
+extern struct device *class_find_device(struct class *class,
+ struct device *start, const void *data,
+ int (*match)(struct device *, const void *));
+
+/**
+ * class_find_device_by_name - device iterator for locating a particular device
+ * of a specific name.
+ * @class: class type
+ * @name: name of the device to match
+ */
+static inline struct device *class_find_device_by_name(struct class *class,
+ const char *name)
+{
+ return class_find_device(class, NULL, name, device_match_name);
+}
+
+/**
+ * class_find_device_by_of_node : device iterator for locating a particular device
+ * matching the of_node.
+ * @class: class type
+ * @np: of_node of the device to match.
+ */
+static inline struct device *
+class_find_device_by_of_node(struct class *class, const struct device_node *np)
+{
+ return class_find_device(class, NULL, np, device_match_of_node);
+}
+
+/**
+ * class_find_device_by_fwnode : device iterator for locating a particular device
+ * matching the fwnode.
+ * @class: class type
+ * @fwnode: fwnode of the device to match.
+ */
+static inline struct device *
+class_find_device_by_fwnode(struct class *class,
+ const struct fwnode_handle *fwnode)
+{
+ return class_find_device(class, NULL, fwnode, device_match_fwnode);
+}
+
+/**
+ * class_find_device_by_devt : device iterator for locating a particular device
+ * matching the device type.
+ * @class: class type
+ * @devt: device type of the device to match.
+ */
+static inline struct device *class_find_device_by_devt(struct class *class,
+ dev_t devt)
+{
+ return class_find_device(class, NULL, &devt, device_match_devt);
+}
+
+#ifdef CONFIG_ACPI
+struct acpi_device;
+/**
+ * class_find_device_by_acpi_dev : device iterator for locating a particular
+ * device matching the ACPI_COMPANION device.
+ * @class: class type
+ * @adev: ACPI_COMPANION device to match.
+ */
+static inline struct device *
+class_find_device_by_acpi_dev(struct class *class, const struct acpi_device *adev)
+{
+ return class_find_device(class, NULL, adev, device_match_acpi_dev);
+}
+#else
+static inline struct device *
+class_find_device_by_acpi_dev(struct class *class, const void *adev)
+{
+ return NULL;
+}
+#endif
+
+struct class_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct class *class, struct class_attribute *attr,
+ char *buf);
+ ssize_t (*store)(struct class *class, struct class_attribute *attr,
+ const char *buf, size_t count);
+};
+
+#define CLASS_ATTR_RW(_name) \
+ struct class_attribute class_attr_##_name = __ATTR_RW(_name)
+#define CLASS_ATTR_RO(_name) \
+ struct class_attribute class_attr_##_name = __ATTR_RO(_name)
+#define CLASS_ATTR_WO(_name) \
+ struct class_attribute class_attr_##_name = __ATTR_WO(_name)
+
+extern int __must_check class_create_file_ns(struct class *class,
+ const struct class_attribute *attr,
+ const void *ns);
+extern void class_remove_file_ns(struct class *class,
+ const struct class_attribute *attr,
+ const void *ns);
+
+static inline int __must_check class_create_file(struct class *class,
+ const struct class_attribute *attr)
+{
+ return class_create_file_ns(class, attr, NULL);
+}
+
+static inline void class_remove_file(struct class *class,
+ const struct class_attribute *attr)
+{
+ return class_remove_file_ns(class, attr, NULL);
+}
+
+/* Simple class attribute that is just a static string */
+struct class_attribute_string {
+ struct class_attribute attr;
+ char *str;
+};
+
+/* Currently read-only only */
+#define _CLASS_ATTR_STRING(_name, _mode, _str) \
+ { __ATTR(_name, _mode, show_class_attr_string, NULL), _str }
+#define CLASS_ATTR_STRING(_name, _mode, _str) \
+ struct class_attribute_string class_attr_##_name = \
+ _CLASS_ATTR_STRING(_name, _mode, _str)
+
+extern ssize_t show_class_attr_string(struct class *class, struct class_attribute *attr,
+ char *buf);
+
+struct class_interface {
+ struct list_head node;
+ struct class *class;
+
+ int (*add_dev) (struct device *, struct class_interface *);
+ void (*remove_dev) (struct device *, struct class_interface *);
+};
+
+extern int __must_check class_interface_register(struct class_interface *);
+extern void class_interface_unregister(struct class_interface *);
+
+extern struct class * __must_check __class_create(struct module *owner,
+ const char *name,
+ struct lock_class_key *key);
+extern void class_destroy(struct class *cls);
+
+/* This is a #define to keep the compiler from merging different
+ * instances of the __key variable */
+#define class_create(owner, name) \
+({ \
+ static struct lock_class_key __key; \
+ __class_create(owner, name, &__key); \
+})
+
+
+#endif /* _DEVICE_CLASS_H_ */
diff --git a/include/linux/device/driver.h b/include/linux/device/driver.h
new file mode 100644
index 000000000000..ee7ba5b5417e
--- /dev/null
+++ b/include/linux/device/driver.h
@@ -0,0 +1,292 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * The driver-specific portions of the driver model
+ *
+ * Copyright (c) 2001-2003 Patrick Mochel <mochel@osdl.org>
+ * Copyright (c) 2004-2009 Greg Kroah-Hartman <gregkh@suse.de>
+ * Copyright (c) 2008-2009 Novell Inc.
+ * Copyright (c) 2012-2019 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+ * Copyright (c) 2012-2019 Linux Foundation
+ *
+ * See Documentation/driver-api/driver-model/ for more information.
+ */
+
+#ifndef _DEVICE_DRIVER_H_
+#define _DEVICE_DRIVER_H_
+
+#include <linux/kobject.h>
+#include <linux/klist.h>
+#include <linux/pm.h>
+#include <linux/device/bus.h>
+
+/**
+ * enum probe_type - device driver probe type to try
+ * Device drivers may opt in for special handling of their
+ * respective probe routines. This tells the core what to
+ * expect and prefer.
+ *
+ * @PROBE_DEFAULT_STRATEGY: Used by drivers that work equally well
+ * whether probed synchronously or asynchronously.
+ * @PROBE_PREFER_ASYNCHRONOUS: Drivers for "slow" devices which
+ * probing order is not essential for booting the system may
+ * opt into executing their probes asynchronously.
+ * @PROBE_FORCE_SYNCHRONOUS: Use this to annotate drivers that need
+ * their probe routines to run synchronously with driver and
+ * device registration (with the exception of -EPROBE_DEFER
+ * handling - re-probing always ends up being done asynchronously).
+ *
+ * Note that the end goal is to switch the kernel to use asynchronous
+ * probing by default, so annotating drivers with
+ * %PROBE_PREFER_ASYNCHRONOUS is a temporary measure that allows us
+ * to speed up boot process while we are validating the rest of the
+ * drivers.
+ */
+enum probe_type {
+ PROBE_DEFAULT_STRATEGY,
+ PROBE_PREFER_ASYNCHRONOUS,
+ PROBE_FORCE_SYNCHRONOUS,
+};
+
+/**
+ * struct device_driver - The basic device driver structure
+ * @name: Name of the device driver.
+ * @bus: The bus which the device of this driver belongs to.
+ * @owner: The module owner.
+ * @mod_name: Used for built-in modules.
+ * @suppress_bind_attrs: Disables bind/unbind via sysfs.
+ * @probe_type: Type of the probe (synchronous or asynchronous) to use.
+ * @of_match_table: The open firmware table.
+ * @acpi_match_table: The ACPI match table.
+ * @probe: Called to query the existence of a specific device,
+ * whether this driver can work with it, and bind the driver
+ * to a specific device.
+ * @sync_state: Called to sync device state to software state after all the
+ * state tracking consumers linked to this device (present at
+ * the time of late_initcall) have successfully bound to a
+ * driver. If the device has no consumers, this function will
+ * be called at late_initcall_sync level. If the device has
+ * consumers that are never bound to a driver, this function
+ * will never get called until they do.
+ * @remove: Called when the device is removed from the system to
+ * unbind a device from this driver.
+ * @shutdown: Called at shut-down time to quiesce the device.
+ * @suspend: Called to put the device to sleep mode. Usually to a
+ * low power state.
+ * @resume: Called to bring a device from sleep mode.
+ * @groups: Default attributes that get created by the driver core
+ * automatically.
+ * @dev_groups: Additional attributes attached to device instance once the
+ * it is bound to the driver.
+ * @pm: Power management operations of the device which matched
+ * this driver.
+ * @coredump: Called when sysfs entry is written to. The device driver
+ * is expected to call the dev_coredump API resulting in a
+ * uevent.
+ * @p: Driver core's private data, no one other than the driver
+ * core can touch this.
+ *
+ * The device driver-model tracks all of the drivers known to the system.
+ * The main reason for this tracking is to enable the driver core to match
+ * up drivers with new devices. Once drivers are known objects within the
+ * system, however, a number of other things become possible. Device drivers
+ * can export information and configuration variables that are independent
+ * of any specific device.
+ */
+struct device_driver {
+ const char *name;
+ struct bus_type *bus;
+
+ struct module *owner;
+ const char *mod_name; /* used for built-in modules */
+
+ bool suppress_bind_attrs; /* disables bind/unbind via sysfs */
+ enum probe_type probe_type;
+
+ const struct of_device_id *of_match_table;
+ const struct acpi_device_id *acpi_match_table;
+
+ int (*probe) (struct device *dev);
+ void (*sync_state)(struct device *dev);
+ int (*remove) (struct device *dev);
+ void (*shutdown) (struct device *dev);
+ int (*suspend) (struct device *dev, pm_message_t state);
+ int (*resume) (struct device *dev);
+ const struct attribute_group **groups;
+ const struct attribute_group **dev_groups;
+
+ const struct dev_pm_ops *pm;
+ void (*coredump) (struct device *dev);
+
+ struct driver_private *p;
+};
+
+
+extern int __must_check driver_register(struct device_driver *drv);
+extern void driver_unregister(struct device_driver *drv);
+
+extern struct device_driver *driver_find(const char *name,
+ struct bus_type *bus);
+extern int driver_probe_done(void);
+extern void wait_for_device_probe(void);
+
+/* sysfs interface for exporting driver attributes */
+
+struct driver_attribute {
+ struct attribute attr;
+ ssize_t (*show)(struct device_driver *driver, char *buf);
+ ssize_t (*store)(struct device_driver *driver, const char *buf,
+ size_t count);
+};
+
+#define DRIVER_ATTR_RW(_name) \
+ struct driver_attribute driver_attr_##_name = __ATTR_RW(_name)
+#define DRIVER_ATTR_RO(_name) \
+ struct driver_attribute driver_attr_##_name = __ATTR_RO(_name)
+#define DRIVER_ATTR_WO(_name) \
+ struct driver_attribute driver_attr_##_name = __ATTR_WO(_name)
+
+extern int __must_check driver_create_file(struct device_driver *driver,
+ const struct driver_attribute *attr);
+extern void driver_remove_file(struct device_driver *driver,
+ const struct driver_attribute *attr);
+
+extern int __must_check driver_for_each_device(struct device_driver *drv,
+ struct device *start,
+ void *data,
+ int (*fn)(struct device *dev,
+ void *));
+struct device *driver_find_device(struct device_driver *drv,
+ struct device *start, const void *data,
+ int (*match)(struct device *dev, const void *data));
+
+/**
+ * driver_find_device_by_name - device iterator for locating a particular device
+ * of a specific name.
+ * @drv: the driver we're iterating
+ * @name: name of the device to match
+ */
+static inline struct device *driver_find_device_by_name(struct device_driver *drv,
+ const char *name)
+{
+ return driver_find_device(drv, NULL, name, device_match_name);
+}
+
+/**
+ * driver_find_device_by_of_node- device iterator for locating a particular device
+ * by of_node pointer.
+ * @drv: the driver we're iterating
+ * @np: of_node pointer to match.
+ */
+static inline struct device *
+driver_find_device_by_of_node(struct device_driver *drv,
+ const struct device_node *np)
+{
+ return driver_find_device(drv, NULL, np, device_match_of_node);
+}
+
+/**
+ * driver_find_device_by_fwnode- device iterator for locating a particular device
+ * by fwnode pointer.
+ * @drv: the driver we're iterating
+ * @fwnode: fwnode pointer to match.
+ */
+static inline struct device *
+driver_find_device_by_fwnode(struct device_driver *drv,
+ const struct fwnode_handle *fwnode)
+{
+ return driver_find_device(drv, NULL, fwnode, device_match_fwnode);
+}
+
+/**
+ * driver_find_device_by_devt- device iterator for locating a particular device
+ * by devt.
+ * @drv: the driver we're iterating
+ * @devt: devt pointer to match.
+ */
+static inline struct device *driver_find_device_by_devt(struct device_driver *drv,
+ dev_t devt)
+{
+ return driver_find_device(drv, NULL, &devt, device_match_devt);
+}
+
+static inline struct device *driver_find_next_device(struct device_driver *drv,
+ struct device *start)
+{
+ return driver_find_device(drv, start, NULL, device_match_any);
+}
+
+#ifdef CONFIG_ACPI
+/**
+ * driver_find_device_by_acpi_dev : device iterator for locating a particular
+ * device matching the ACPI_COMPANION device.
+ * @drv: the driver we're iterating
+ * @adev: ACPI_COMPANION device to match.
+ */
+static inline struct device *
+driver_find_device_by_acpi_dev(struct device_driver *drv,
+ const struct acpi_device *adev)
+{
+ return driver_find_device(drv, NULL, adev, device_match_acpi_dev);
+}
+#else
+static inline struct device *
+driver_find_device_by_acpi_dev(struct device_driver *drv, const void *adev)
+{
+ return NULL;
+}
+#endif
+
+extern int driver_deferred_probe_timeout;
+void driver_deferred_probe_add(struct device *dev);
+int driver_deferred_probe_check_state(struct device *dev);
+void driver_init(void);
+
+/**
+ * module_driver() - Helper macro for drivers that don't do anything
+ * special in module init/exit. This eliminates a lot of boilerplate.
+ * Each module may only use this macro once, and calling it replaces
+ * module_init() and module_exit().
+ *
+ * @__driver: driver name
+ * @__register: register function for this driver type
+ * @__unregister: unregister function for this driver type
+ * @...: Additional arguments to be passed to __register and __unregister.
+ *
+ * Use this macro to construct bus specific macros for registering
+ * drivers, and do not use it on its own.
+ */
+#define module_driver(__driver, __register, __unregister, ...) \
+static int __init __driver##_init(void) \
+{ \
+ return __register(&(__driver) , ##__VA_ARGS__); \
+} \
+module_init(__driver##_init); \
+static void __exit __driver##_exit(void) \
+{ \
+ __unregister(&(__driver) , ##__VA_ARGS__); \
+} \
+module_exit(__driver##_exit);
+
+/**
+ * builtin_driver() - Helper macro for drivers that don't do anything
+ * special in init and have no exit. This eliminates some boilerplate.
+ * Each driver may only use this macro once, and calling it replaces
+ * device_initcall (or in some cases, the legacy __initcall). This is
+ * meant to be a direct parallel of module_driver() above but without
+ * the __exit stuff that is not used for builtin cases.
+ *
+ * @__driver: driver name
+ * @__register: register function for this driver type
+ * @...: Additional arguments to be passed to __register
+ *
+ * Use this macro to construct bus specific macros for registering
+ * drivers, and do not use it on its own.
+ */
+#define builtin_driver(__driver, __register, ...) \
+static int __init __driver##_init(void) \
+{ \
+ return __register(&(__driver) , ##__VA_ARGS__); \
+} \
+device_initcall(__driver##_init);
+
+#endif /* _DEVICE_DRIVER_H_ */
diff --git a/include/linux/device_cgroup.h b/include/linux/device_cgroup.h
index 8557efe096dc..fa35b52e0002 100644
--- a/include/linux/device_cgroup.h
+++ b/include/linux/device_cgroup.h
@@ -12,26 +12,15 @@
#define DEVCG_DEV_ALL 4 /* this represents all devices */
#ifdef CONFIG_CGROUP_DEVICE
-extern int __devcgroup_check_permission(short type, u32 major, u32 minor,
- short access);
+int devcgroup_check_permission(short type, u32 major, u32 minor,
+ short access);
#else
-static inline int __devcgroup_check_permission(short type, u32 major, u32 minor,
- short access)
+static inline int devcgroup_check_permission(short type, u32 major, u32 minor,
+ short access)
{ return 0; }
#endif
#if defined(CONFIG_CGROUP_DEVICE) || defined(CONFIG_CGROUP_BPF)
-static inline int devcgroup_check_permission(short type, u32 major, u32 minor,
- short access)
-{
- int rc = BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(type, major, minor, access);
-
- if (rc)
- return -EPERM;
-
- return __devcgroup_check_permission(type, major, minor, access);
-}
-
static inline int devcgroup_inode_permission(struct inode *inode, int mask)
{
short type, access = 0;
diff --git a/include/linux/dim.h b/include/linux/dim.h
index 9fa4b3f88c39..b698266d0035 100644
--- a/include/linux/dim.h
+++ b/include/linux/dim.h
@@ -4,22 +4,26 @@
#ifndef DIM_H
#define DIM_H
+#include <linux/bits.h>
+#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/workqueue.h>
-/**
+/*
* Number of events between DIM iterations.
* Causes a moderation of the algorithm run.
*/
#define DIM_NEVENTS 64
-/**
+/*
* Is a difference between values justifies taking an action.
* We consider 10% difference as significant.
*/
#define IS_SIGNIFICANT_DIFF(val, ref) \
(((100UL * abs((val) - (ref))) / (ref)) > 10)
-/**
+/*
* Calculate the gap between two values.
* Take wrap-around and variable size into consideration.
*/
@@ -27,12 +31,13 @@
& (BIT_ULL(bits) - 1))
/**
- * Structure for CQ moderation values.
+ * struct dim_cq_moder - Structure for CQ moderation values.
* Used for communications between DIM and its consumer.
*
* @usec: CQ timer suggestion (by DIM)
* @pkts: CQ packet counter suggestion (by DIM)
- * @cq_period_mode: CQ priod count mode (from CQE/EQE)
+ * @comps: Completion counter
+ * @cq_period_mode: CQ period count mode (from CQE/EQE)
*/
struct dim_cq_moder {
u16 usec;
@@ -42,13 +47,14 @@ struct dim_cq_moder {
};
/**
- * Structure for DIM sample data.
+ * struct dim_sample - Structure for DIM sample data.
* Used for communications between DIM and its consumer.
*
* @time: Sample timestamp
* @pkt_ctr: Number of packets
* @byte_ctr: Number of bytes
* @event_ctr: Number of events
+ * @comp_ctr: Current completion counter
*/
struct dim_sample {
ktime_t time;
@@ -59,12 +65,14 @@ struct dim_sample {
};
/**
- * Structure for DIM stats.
+ * struct dim_stats - Structure for DIM stats.
* Used for holding current measured rates.
*
* @ppms: Packets per msec
* @bpms: Bytes per msec
* @epms: Events per msec
+ * @cpms: Completions per msec
+ * @cpe_ratio: Ratio of completions to events
*/
struct dim_stats {
int ppms; /* packets per msec */
@@ -75,12 +83,13 @@ struct dim_stats {
};
/**
- * Main structure for dynamic interrupt moderation (DIM).
+ * struct dim - Main structure for dynamic interrupt moderation (DIM).
* Used for holding all information about a specific DIM instance.
*
* @state: Algorithm state (see below)
* @prev_stats: Measured rates from previous iteration (for comparison)
* @start_sample: Sampled data at start of current iteration
+ * @measuring_sample: A &dim_sample that is used to update the current events
* @work: Work to perform on action required
* @priv: A pointer to the struct that points to dim
* @profile_ix: Current moderation profile
@@ -106,24 +115,21 @@ struct dim {
};
/**
- * enum dim_cq_period_mode
- *
- * These are the modes for CQ period count.
+ * enum dim_cq_period_mode - Modes for CQ period count
*
* @DIM_CQ_PERIOD_MODE_START_FROM_EQE: Start counting from EQE
* @DIM_CQ_PERIOD_MODE_START_FROM_CQE: Start counting from CQE (implies timer reset)
* @DIM_CQ_PERIOD_NUM_MODES: Number of modes
*/
-enum {
+enum dim_cq_period_mode {
DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
DIM_CQ_PERIOD_NUM_MODES
};
/**
- * enum dim_state
+ * enum dim_state - DIM algorithm states
*
- * These are the DIM algorithm states.
* These will determine if the algorithm is in a valid state to start an iteration.
*
* @DIM_START_MEASURE: This is the first iteration (also after applying a new profile)
@@ -131,16 +137,15 @@ enum {
* need to perform an action
* @DIM_APPLY_NEW_PROFILE: DIM consumer is currently applying a profile - no need to measure
*/
-enum {
+enum dim_state {
DIM_START_MEASURE,
DIM_MEASURE_IN_PROGRESS,
DIM_APPLY_NEW_PROFILE,
};
/**
- * enum dim_tune_state
+ * enum dim_tune_state - DIM algorithm tune states
*
- * These are the DIM algorithm tune states.
* These will determine which action the algorithm should perform.
*
* @DIM_PARKING_ON_TOP: Algorithm found a local top point - exit on significant difference
@@ -148,7 +153,7 @@ enum {
* @DIM_GOING_RIGHT: Algorithm is currently trying higher moderation levels
* @DIM_GOING_LEFT: Algorithm is currently trying lower moderation levels
*/
-enum {
+enum dim_tune_state {
DIM_PARKING_ON_TOP,
DIM_PARKING_TIRED,
DIM_GOING_RIGHT,
@@ -156,25 +161,23 @@ enum {
};
/**
- * enum dim_stats_state
+ * enum dim_stats_state - DIM algorithm statistics states
*
- * These are the DIM algorithm statistics states.
* These will determine the verdict of current iteration.
*
* @DIM_STATS_WORSE: Current iteration shows worse performance than before
- * @DIM_STATS_WORSE: Current iteration shows same performance than before
- * @DIM_STATS_WORSE: Current iteration shows better performance than before
+ * @DIM_STATS_SAME: Current iteration shows same performance than before
+ * @DIM_STATS_BETTER: Current iteration shows better performance than before
*/
-enum {
+enum dim_stats_state {
DIM_STATS_WORSE,
DIM_STATS_SAME,
DIM_STATS_BETTER,
};
/**
- * enum dim_step_result
+ * enum dim_step_result - DIM algorithm step results
*
- * These are the DIM algorithm step results.
* These describe the result of a step.
*
* @DIM_STEPPED: Performed a regular step
@@ -182,7 +185,7 @@ enum {
* tired parking
* @DIM_ON_EDGE: Stepped to the most left/right profile
*/
-enum {
+enum dim_step_result {
DIM_STEPPED,
DIM_TOO_TIRED,
DIM_ON_EDGE,
@@ -199,7 +202,7 @@ enum {
bool dim_on_top(struct dim *dim);
/**
- * dim_turn - change profile alterning direction
+ * dim_turn - change profile altering direction
* @dim: DIM context
*
* Go left if we were going right and vice-versa.
@@ -238,7 +241,7 @@ void dim_calc_stats(struct dim_sample *start, struct dim_sample *end,
struct dim_stats *curr_stats);
/**
- * dim_update_sample - set a sample's fields with give values
+ * dim_update_sample - set a sample's fields with given values
* @event_ctr: number of events to set
* @packets: number of packets to set
* @bytes: number of bytes to set
@@ -304,8 +307,8 @@ struct dim_cq_moder net_dim_get_def_tx_moderation(u8 cq_period_mode);
* @end_sample: Current data measurement
*
* Called by the consumer.
- * This is the main logic of the algorithm, where data is processed in order to decide on next
- * required action.
+ * This is the main logic of the algorithm, where data is processed in order
+ * to decide on next required action.
*/
void net_dim(struct dim *dim, struct dim_sample end_sample);
diff --git a/include/linux/dio.h b/include/linux/dio.h
index 1470d1d943b4..5abd07361eb5 100644
--- a/include/linux/dio.h
+++ b/include/linux/dio.h
@@ -247,11 +247,6 @@ extern int dio_create_sysfs_dev_files(struct dio_dev *);
/* New-style probing */
extern int dio_register_driver(struct dio_driver *);
extern void dio_unregister_driver(struct dio_driver *);
-extern const struct dio_device_id *dio_match_device(const struct dio_device_id *ids, const struct dio_dev *z);
-static inline struct dio_driver *dio_dev_driver(const struct dio_dev *d)
-{
- return d->driver;
-}
#define dio_resource_start(d) ((d)->resource.start)
#define dio_resource_end(d) ((d)->resource.end)
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index ec212cb27fdc..abf5459a5b9d 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -43,6 +43,18 @@ struct dma_buf_ops {
bool cache_sgt_mapping;
/**
+ * @dynamic_mapping:
+ *
+ * If true the framework makes sure that the map/unmap_dma_buf
+ * callbacks are always called with the dma_resv object locked.
+ *
+ * If false the framework makes sure that the map/unmap_dma_buf
+ * callbacks are always called without the dma_resv object locked.
+ * Mutual exclusive with @cache_sgt_mapping.
+ */
+ bool dynamic_mapping;
+
+ /**
* @attach:
*
* This is called from dma_buf_attach() to make sure that a given
@@ -109,6 +121,9 @@ struct dma_buf_ops {
* any other kind of sharing that the exporter might wish to make
* available to buffer-users.
*
+ * This is always called with the dmabuf->resv object locked when
+ * the dynamic_mapping flag is true.
+ *
* Returns:
*
* A &sg_table scatter list of or the backing storage of the DMA buffer,
@@ -234,31 +249,6 @@ struct dma_buf_ops {
*/
int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
- /**
- * @map:
- *
- * Maps a page from the buffer into kernel address space. The page is
- * specified by offset into the buffer in PAGE_SIZE units.
- *
- * This callback is optional.
- *
- * Returns:
- *
- * Virtual address pointer where requested page can be accessed. NULL
- * on error or when this function is unimplemented by the exporter.
- */
- void *(*map)(struct dma_buf *, unsigned long);
-
- /**
- * @unmap:
- *
- * Unmaps a page from the buffer. Page offset and address pointer should
- * be the same as the one passed to and returned by matching call to map.
- *
- * This callback is optional.
- */
- void (*unmap)(struct dma_buf *, unsigned long, void *);
-
void *(*vmap)(struct dma_buf *);
void (*vunmap)(struct dma_buf *, void *vaddr);
};
@@ -267,14 +257,16 @@ struct dma_buf_ops {
* struct dma_buf - shared buffer object
* @size: size of the buffer
* @file: file pointer used for sharing buffers across, and for refcounting.
- * @attachments: list of dma_buf_attachment that denotes all devices attached.
+ * @attachments: list of dma_buf_attachment that denotes all devices attached,
+ * protected by dma_resv lock.
* @ops: dma_buf_ops associated with this buffer object.
* @lock: used internally to serialize list manipulation, attach/detach and
- * vmap/unmap, and accesses to name
+ * vmap/unmap
* @vmapping_counter: used internally to refcnt the vmaps
* @vmap_ptr: the current vmap ptr if vmapping_counter > 0
* @exp_name: name of the exporter; useful for debugging.
- * @name: userspace-provided name; useful for accounting and debugging.
+ * @name: userspace-provided name; useful for accounting and debugging,
+ * protected by @resv.
* @owner: pointer to exporter module; used for refcounting when exporter is a
* kernel module.
* @list_node: node for dma_buf accounting and debugging.
@@ -323,10 +315,12 @@ struct dma_buf {
* struct dma_buf_attachment - holds device-buffer attachment data
* @dmabuf: buffer for this attachment.
* @dev: device attached to the buffer.
- * @node: list of dma_buf_attachment.
+ * @node: list of dma_buf_attachment, protected by dma_resv lock of the dmabuf.
* @sgt: cached mapping.
* @dir: direction of cached mapping.
* @priv: exporter specific attachment data.
+ * @dynamic_mapping: true if dma_buf_map/unmap_attachment() is called with the
+ * dma_resv lock held.
*
* This structure holds the attachment information between the dma_buf buffer
* and its user device(s). The list contains one attachment struct per device
@@ -343,6 +337,7 @@ struct dma_buf_attachment {
struct list_head node;
struct sg_table *sgt;
enum dma_data_direction dir;
+ bool dynamic_mapping;
void *priv;
};
@@ -394,10 +389,40 @@ static inline void get_dma_buf(struct dma_buf *dmabuf)
get_file(dmabuf->file);
}
+/**
+ * dma_buf_is_dynamic - check if a DMA-buf uses dynamic mappings.
+ * @dmabuf: the DMA-buf to check
+ *
+ * Returns true if a DMA-buf exporter wants to be called with the dma_resv
+ * locked for the map/unmap callbacks, false if it doesn't wants to be called
+ * with the lock held.
+ */
+static inline bool dma_buf_is_dynamic(struct dma_buf *dmabuf)
+{
+ return dmabuf->ops->dynamic_mapping;
+}
+
+/**
+ * dma_buf_attachment_is_dynamic - check if a DMA-buf attachment uses dynamic
+ * mappinsg
+ * @attach: the DMA-buf attachment to check
+ *
+ * Returns true if a DMA-buf importer wants to call the map/unmap functions with
+ * the dma_resv lock held.
+ */
+static inline bool
+dma_buf_attachment_is_dynamic(struct dma_buf_attachment *attach)
+{
+ return attach->dynamic_mapping;
+}
+
struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
- struct device *dev);
+ struct device *dev);
+struct dma_buf_attachment *
+dma_buf_dynamic_attach(struct dma_buf *dmabuf, struct device *dev,
+ bool dynamic_mapping);
void dma_buf_detach(struct dma_buf *dmabuf,
- struct dma_buf_attachment *dmabuf_attach);
+ struct dma_buf_attachment *attach);
struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info);
@@ -409,12 +434,11 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
enum dma_data_direction);
void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
enum dma_data_direction);
+void dma_buf_move_notify(struct dma_buf *dma_buf);
int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
int dma_buf_end_cpu_access(struct dma_buf *dma_buf,
enum dma_data_direction dir);
-void *dma_buf_kmap(struct dma_buf *, unsigned long);
-void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
unsigned long);
diff --git a/include/linux/dma-direct.h b/include/linux/dma-direct.h
index adf993a3bd58..24b8684aa21d 100644
--- a/include/linux/dma-direct.h
+++ b/include/linux/dma-direct.h
@@ -3,8 +3,11 @@
#define _LINUX_DMA_DIRECT_H 1
#include <linux/dma-mapping.h>
+#include <linux/memblock.h> /* for min_low_pfn */
#include <linux/mem_encrypt.h>
+extern unsigned int zone_dma_bits;
+
#ifdef CONFIG_ARCH_HAS_PHYS_TO_DMA
#include <asm/dma-direct.h>
#else
@@ -21,15 +24,6 @@ static inline phys_addr_t __dma_to_phys(struct device *dev, dma_addr_t dev_addr)
return paddr + ((phys_addr_t)dev->dma_pfn_offset << PAGE_SHIFT);
}
-
-static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
-{
- if (!dev->dma_mask)
- return false;
-
- return addr + size - 1 <=
- min_not_zero(*dev->dma_mask, dev->bus_dma_mask);
-}
#endif /* !CONFIG_ARCH_HAS_PHYS_TO_DMA */
#ifdef CONFIG_ARCH_HAS_FORCE_DMA_UNENCRYPTED
@@ -57,6 +51,21 @@ static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t daddr)
return __sme_clr(__dma_to_phys(dev, daddr));
}
+static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size,
+ bool is_ram)
+{
+ dma_addr_t end = addr + size - 1;
+
+ if (!dev->dma_mask)
+ return false;
+
+ if (is_ram && !IS_ENABLED(CONFIG_ARCH_DMA_ADDR_T_64BIT) &&
+ min(addr, end) < phys_to_dma(dev, PFN_PHYS(min_low_pfn)))
+ return false;
+
+ return end <= min_not_zero(*dev->dma_mask, dev->bus_dma_limit);
+}
+
u64 dma_direct_get_required_mask(struct device *dev);
void *dma_direct_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
@@ -67,7 +76,13 @@ void *dma_direct_alloc_pages(struct device *dev, size_t size,
void dma_direct_free_pages(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
struct page *__dma_direct_alloc_pages(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs);
-void __dma_direct_free_pages(struct device *dev, size_t size, struct page *page);
+ gfp_t gfp, unsigned long attrs);
+int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
+bool dma_direct_can_mmap(struct device *dev);
+int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
+ void *cpu_addr, dma_addr_t dma_addr, size_t size,
+ unsigned long attrs);
int dma_direct_supported(struct device *dev, u64 mask);
#endif /* _LINUX_DMA_DIRECT_H */
diff --git a/include/linux/dma-heap.h b/include/linux/dma-heap.h
new file mode 100644
index 000000000000..454e354d1ffb
--- /dev/null
+++ b/include/linux/dma-heap.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * DMABUF Heaps Allocation Infrastructure
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+
+#ifndef _DMA_HEAPS_H
+#define _DMA_HEAPS_H
+
+#include <linux/cdev.h>
+#include <linux/types.h>
+
+struct dma_heap;
+
+/**
+ * struct dma_heap_ops - ops to operate on a given heap
+ * @allocate: allocate dmabuf and return fd
+ *
+ * allocate returns dmabuf fd on success, -errno on error.
+ */
+struct dma_heap_ops {
+ int (*allocate)(struct dma_heap *heap,
+ unsigned long len,
+ unsigned long fd_flags,
+ unsigned long heap_flags);
+};
+
+/**
+ * struct dma_heap_export_info - information needed to export a new dmabuf heap
+ * @name: used for debugging/device-node name
+ * @ops: ops struct for this heap
+ * @priv: heap exporter private data
+ *
+ * Information needed to export a new dmabuf heap.
+ */
+struct dma_heap_export_info {
+ const char *name;
+ const struct dma_heap_ops *ops;
+ void *priv;
+};
+
+/**
+ * dma_heap_get_drvdata() - get per-heap driver data
+ * @heap: DMA-Heap to retrieve private data for
+ *
+ * Returns:
+ * The per-heap data for the heap.
+ */
+void *dma_heap_get_drvdata(struct dma_heap *heap);
+
+/**
+ * dma_heap_add - adds a heap to dmabuf heaps
+ * @exp_info: information needed to register this heap
+ */
+struct dma_heap *dma_heap_add(const struct dma_heap_export_info *exp_info);
+
+#endif /* _DMA_HEAPS_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 4a1c4fca475a..330ad58fbf4d 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -15,11 +15,8 @@
/**
* List of possible attributes associated with a DMA mapping. The semantics
* of each attribute should be defined in Documentation/DMA-attributes.txt.
- *
- * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
- * forces all pending DMA writes to complete.
*/
-#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
+
/*
* DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
* may be weakly ordered, that is that reads and writes may pass each other.
@@ -162,7 +159,7 @@ int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, size_t size, int *ret);
-void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
+void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size, dma_addr_t *dma_handle);
int dma_release_from_global_coherent(int order, void *vaddr);
int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
size_t size, int *ret);
@@ -172,7 +169,7 @@ int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
-static inline void *dma_alloc_from_global_coherent(ssize_t size,
+static inline void *dma_alloc_from_global_coherent(struct device *dev, ssize_t size,
dma_addr_t *dma_handle)
{
return NULL;
@@ -583,6 +580,10 @@ static inline unsigned long dma_get_merge_boundary(struct device *dev)
static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
size_t size, enum dma_data_direction dir, unsigned long attrs)
{
+ /* DMA must never operate on areas that might be remapped. */
+ if (dev_WARN_ONCE(dev, is_vmalloc_addr(ptr),
+ "rejecting DMA map of vmalloc memory\n"))
+ return DMA_MAPPING_ERROR;
debug_dma_map_single(dev, ptr, size);
return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
size, dir, attrs);
@@ -693,7 +694,7 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
*/
static inline bool dma_addressing_limited(struct device *dev)
{
- return min_not_zero(dma_get_mask(dev), dev->bus_dma_mask) <
+ return min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) <
dma_get_required_mask(dev);
}
diff --git a/include/linux/dma-noncoherent.h b/include/linux/dma-noncoherent.h
index dd3de6d88fc0..ca9b5770caee 100644
--- a/include/linux/dma-noncoherent.h
+++ b/include/linux/dma-noncoherent.h
@@ -41,8 +41,6 @@ void *arch_dma_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
gfp_t gfp, unsigned long attrs);
void arch_dma_free(struct device *dev, size_t size, void *cpu_addr,
dma_addr_t dma_addr, unsigned long attrs);
-long arch_dma_coherent_to_pfn(struct device *dev, void *cpu_addr,
- dma_addr_t dma_addr);
#ifdef CONFIG_MMU
/*
@@ -75,29 +73,29 @@ static inline void arch_dma_cache_sync(struct device *dev, void *vaddr,
#endif /* CONFIG_DMA_NONCOHERENT_CACHE_SYNC */
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE
-void arch_sync_dma_for_device(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir);
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir);
#else
-static inline void arch_sync_dma_for_device(struct device *dev,
- phys_addr_t paddr, size_t size, enum dma_data_direction dir)
+static inline void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
}
#endif /* ARCH_HAS_SYNC_DMA_FOR_DEVICE */
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU
-void arch_sync_dma_for_cpu(struct device *dev, phys_addr_t paddr,
- size_t size, enum dma_data_direction dir);
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir);
#else
-static inline void arch_sync_dma_for_cpu(struct device *dev,
- phys_addr_t paddr, size_t size, enum dma_data_direction dir)
+static inline void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
{
}
#endif /* ARCH_HAS_SYNC_DMA_FOR_CPU */
#ifdef CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL
-void arch_sync_dma_for_cpu_all(struct device *dev);
+void arch_sync_dma_for_cpu_all(void);
#else
-static inline void arch_sync_dma_for_cpu_all(struct device *dev)
+static inline void arch_sync_dma_for_cpu_all(void)
{
}
#endif /* CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL */
diff --git a/include/linux/dma/k3-psil.h b/include/linux/dma/k3-psil.h
new file mode 100644
index 000000000000..61d5cc0ad601
--- /dev/null
+++ b/include/linux/dma/k3-psil.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_PSIL_H_
+#define K3_PSIL_H_
+
+#include <linux/types.h>
+
+#define K3_PSIL_DST_THREAD_ID_OFFSET 0x8000
+
+struct device;
+
+/**
+ * enum udma_tp_level - Channel Throughput Levels
+ * @UDMA_TP_NORMAL: Normal channel
+ * @UDMA_TP_HIGH: High Throughput channel
+ * @UDMA_TP_ULTRAHIGH: Ultra High Throughput channel
+ */
+enum udma_tp_level {
+ UDMA_TP_NORMAL = 0,
+ UDMA_TP_HIGH,
+ UDMA_TP_ULTRAHIGH,
+ UDMA_TP_LAST,
+};
+
+/**
+ * enum psil_endpoint_type - PSI-L Endpoint type
+ * @PSIL_EP_NATIVE: Normal channel
+ * @PSIL_EP_PDMA_XY: XY mode PDMA
+ * @PSIL_EP_PDMA_MCAN: MCAN mode PDMA
+ * @PSIL_EP_PDMA_AASRC: AASRC mode PDMA
+ */
+enum psil_endpoint_type {
+ PSIL_EP_NATIVE = 0,
+ PSIL_EP_PDMA_XY,
+ PSIL_EP_PDMA_MCAN,
+ PSIL_EP_PDMA_AASRC,
+};
+
+/**
+ * struct psil_endpoint_config - PSI-L Endpoint configuration
+ * @ep_type: PSI-L endpoint type
+ * @pkt_mode: If set, the channel must be in Packet mode, otherwise in
+ * TR mode
+ * @notdpkt: TDCM must be suppressed on the TX channel
+ * @needs_epib: Endpoint needs EPIB
+ * @psd_size: If set, PSdata is used by the endpoint
+ * @channel_tpl: Desired throughput level for the channel
+ * @pdma_acc32: ACC32 must be enabled on the PDMA side
+ * @pdma_burst: BURST must be enabled on the PDMA side
+ */
+struct psil_endpoint_config {
+ enum psil_endpoint_type ep_type;
+
+ unsigned pkt_mode:1;
+ unsigned notdpkt:1;
+ unsigned needs_epib:1;
+ u32 psd_size;
+ enum udma_tp_level channel_tpl;
+
+ /* PDMA properties, valid for PSIL_EP_PDMA_* */
+ unsigned pdma_acc32:1;
+ unsigned pdma_burst:1;
+};
+
+int psil_set_new_ep_config(struct device *dev, const char *name,
+ struct psil_endpoint_config *ep_config);
+
+#endif /* K3_PSIL_H_ */
diff --git a/include/linux/dma/k3-udma-glue.h b/include/linux/dma/k3-udma-glue.h
new file mode 100644
index 000000000000..caadbab1632a
--- /dev/null
+++ b/include/linux/dma/k3-udma-glue.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef K3_UDMA_GLUE_H_
+#define K3_UDMA_GLUE_H_
+
+#include <linux/types.h>
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/dma/ti-cppi5.h>
+
+struct k3_udma_glue_tx_channel_cfg {
+ struct k3_ring_cfg tx_cfg;
+ struct k3_ring_cfg txcq_cfg;
+
+ bool tx_pause_on_err;
+ bool tx_filt_einfo;
+ bool tx_filt_pswords;
+ bool tx_supr_tdpkt;
+ u32 swdata_size;
+};
+
+struct k3_udma_glue_tx_channel;
+
+struct k3_udma_glue_tx_channel *k3_udma_glue_request_tx_chn(struct device *dev,
+ const char *name, struct k3_udma_glue_tx_channel_cfg *cfg);
+
+void k3_udma_glue_release_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
+int k3_udma_glue_push_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ struct cppi5_host_desc_t *desc_tx,
+ dma_addr_t desc_dma);
+int k3_udma_glue_pop_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ dma_addr_t *desc_dma);
+int k3_udma_glue_enable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
+void k3_udma_glue_disable_tx_chn(struct k3_udma_glue_tx_channel *tx_chn);
+void k3_udma_glue_tdown_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ bool sync);
+void k3_udma_glue_reset_tx_chn(struct k3_udma_glue_tx_channel *tx_chn,
+ void *data, void (*cleanup)(void *data, dma_addr_t desc_dma));
+u32 k3_udma_glue_tx_get_hdesc_size(struct k3_udma_glue_tx_channel *tx_chn);
+u32 k3_udma_glue_tx_get_txcq_id(struct k3_udma_glue_tx_channel *tx_chn);
+int k3_udma_glue_tx_get_irq(struct k3_udma_glue_tx_channel *tx_chn);
+
+enum {
+ K3_UDMA_GLUE_SRC_TAG_LO_KEEP = 0,
+ K3_UDMA_GLUE_SRC_TAG_LO_USE_FLOW_REG = 1,
+ K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_FLOW_ID = 2,
+ K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG = 4,
+};
+
+/**
+ * k3_udma_glue_rx_flow_cfg - UDMA RX flow cfg
+ *
+ * @rx_cfg: RX ring configuration
+ * @rxfdq_cfg: RX free Host PD ring configuration
+ * @ring_rxq_id: RX ring id (or -1 for any)
+ * @ring_rxfdq0_id: RX free Host PD ring (FDQ) if (or -1 for any)
+ * @rx_error_handling: Rx Error Handling Mode (0 - drop, 1 - re-try)
+ * @src_tag_lo_sel: Rx Source Tag Low Byte Selector in Host PD
+ */
+struct k3_udma_glue_rx_flow_cfg {
+ struct k3_ring_cfg rx_cfg;
+ struct k3_ring_cfg rxfdq_cfg;
+ int ring_rxq_id;
+ int ring_rxfdq0_id;
+ bool rx_error_handling;
+ int src_tag_lo_sel;
+};
+
+/**
+ * k3_udma_glue_rx_channel_cfg - UDMA RX channel cfg
+ *
+ * @psdata_size: SW Data is present in Host PD of @swdata_size bytes
+ * @flow_id_base: first flow_id used by channel.
+ * if @flow_id_base = -1 - range of GP rflows will be
+ * allocated dynamically.
+ * @flow_id_num: number of RX flows used by channel
+ * @flow_id_use_rxchan_id: use RX channel id as flow id,
+ * used only if @flow_id_num = 1
+ * @remote indication that RX channel is remote - some remote CPU
+ * core owns and control the RX channel. Linux Host only
+ * allowed to attach and configure RX Flow within RX
+ * channel. if set - not RX channel operation will be
+ * performed by K3 NAVSS DMA glue interface.
+ * @def_flow_cfg default RX flow configuration,
+ * used only if @flow_id_num = 1
+ */
+struct k3_udma_glue_rx_channel_cfg {
+ u32 swdata_size;
+ int flow_id_base;
+ int flow_id_num;
+ bool flow_id_use_rxchan_id;
+ bool remote;
+
+ struct k3_udma_glue_rx_flow_cfg *def_flow_cfg;
+};
+
+struct k3_udma_glue_rx_channel;
+
+struct k3_udma_glue_rx_channel *k3_udma_glue_request_rx_chn(
+ struct device *dev,
+ const char *name,
+ struct k3_udma_glue_rx_channel_cfg *cfg);
+
+void k3_udma_glue_release_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
+int k3_udma_glue_enable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
+void k3_udma_glue_disable_rx_chn(struct k3_udma_glue_rx_channel *rx_chn);
+void k3_udma_glue_tdown_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ bool sync);
+int k3_udma_glue_push_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, struct cppi5_host_desc_t *desc_tx,
+ dma_addr_t desc_dma);
+int k3_udma_glue_pop_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, dma_addr_t *desc_dma);
+int k3_udma_glue_rx_flow_init(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx, struct k3_udma_glue_rx_flow_cfg *flow_cfg);
+u32 k3_udma_glue_rx_flow_get_fdq_id(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx);
+u32 k3_udma_glue_rx_get_flow_id_base(struct k3_udma_glue_rx_channel *rx_chn);
+int k3_udma_glue_rx_get_irq(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num);
+void k3_udma_glue_rx_put_irq(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num);
+void k3_udma_glue_reset_rx_chn(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_num, void *data,
+ void (*cleanup)(void *data, dma_addr_t desc_dma),
+ bool skip_fdq);
+int k3_udma_glue_rx_flow_enable(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx);
+int k3_udma_glue_rx_flow_disable(struct k3_udma_glue_rx_channel *rx_chn,
+ u32 flow_idx);
+
+#endif /* K3_UDMA_GLUE_H_ */
diff --git a/include/linux/dma/sprd-dma.h b/include/linux/dma/sprd-dma.h
index ab82df64682a..d09c6f6f6da5 100644
--- a/include/linux/dma/sprd-dma.h
+++ b/include/linux/dma/sprd-dma.h
@@ -118,6 +118,9 @@ enum sprd_dma_int_type {
* struct sprd_dma_linklist - DMA link-list address structure
* @virt_addr: link-list virtual address to configure link-list node
* @phy_addr: link-list physical address to link DMA transfer
+ * @wrap_addr: the wrap address for link-list mode, which means once the
+ * transfer address reaches the wrap address, the next transfer address
+ * will jump to the address specified by wrap_to register.
*
* The Spreadtrum DMA controller supports the link-list mode, that means slaves
* can supply several groups configurations (each configuration represents one
@@ -181,6 +184,7 @@ enum sprd_dma_int_type {
struct sprd_dma_linklist {
unsigned long virt_addr;
phys_addr_t phy_addr;
+ phys_addr_t wrap_addr;
};
#endif
diff --git a/include/linux/dma/ti-cppi5.h b/include/linux/dma/ti-cppi5.h
new file mode 100644
index 000000000000..579356ae447e
--- /dev/null
+++ b/include/linux/dma/ti-cppi5.h
@@ -0,0 +1,1059 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * CPPI5 descriptors interface
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef __TI_CPPI5_H__
+#define __TI_CPPI5_H__
+
+#include <linux/bitops.h>
+#include <linux/printk.h>
+#include <linux/bug.h>
+
+/**
+ * struct cppi5_desc_hdr_t - Descriptor header, present in all types of
+ * descriptors
+ * @pkt_info0: Packet info word 0 (n/a in Buffer desc)
+ * @pkt_info0: Packet info word 1 (n/a in Buffer desc)
+ * @pkt_info0: Packet info word 2 (n/a in Buffer desc)
+ * @src_dst_tag: Packet info word 3 (n/a in Buffer desc)
+ */
+struct cppi5_desc_hdr_t {
+ u32 pkt_info0;
+ u32 pkt_info1;
+ u32 pkt_info2;
+ u32 src_dst_tag;
+} __packed;
+
+/**
+ * struct cppi5_host_desc_t - Host-mode packet and buffer descriptor definition
+ * @hdr: Descriptor header
+ * @next_desc: word 4/5: Linking word
+ * @buf_ptr: word 6/7: Buffer pointer
+ * @buf_info1: word 8: Buffer valid data length
+ * @org_buf_len: word 9: Original buffer length
+ * @org_buf_ptr: word 10/11: Original buffer pointer
+ * @epib[0]: Extended Packet Info Data (optional, 4 words), and/or
+ * Protocol Specific Data (optional, 0-128 bytes in
+ * multiples of 4), and/or
+ * Other Software Data (0-N bytes, optional)
+ */
+struct cppi5_host_desc_t {
+ struct cppi5_desc_hdr_t hdr;
+ u64 next_desc;
+ u64 buf_ptr;
+ u32 buf_info1;
+ u32 org_buf_len;
+ u64 org_buf_ptr;
+ u32 epib[0];
+} __packed;
+
+#define CPPI5_DESC_MIN_ALIGN (16U)
+
+#define CPPI5_INFO0_HDESC_EPIB_SIZE (16U)
+#define CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE (128U)
+
+#define CPPI5_INFO0_HDESC_TYPE_SHIFT (30U)
+#define CPPI5_INFO0_HDESC_TYPE_MASK GENMASK(31, 30)
+#define CPPI5_INFO0_DESC_TYPE_VAL_HOST (1U)
+#define CPPI5_INFO0_DESC_TYPE_VAL_MONO (2U)
+#define CPPI5_INFO0_DESC_TYPE_VAL_TR (3U)
+#define CPPI5_INFO0_HDESC_EPIB_PRESENT BIT(29)
+/*
+ * Protocol Specific Words location:
+ * 0 - located in the descriptor,
+ * 1 = located in the SOP Buffer immediately prior to the data.
+ */
+#define CPPI5_INFO0_HDESC_PSINFO_LOCATION BIT(28)
+#define CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT (22U)
+#define CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK GENMASK(27, 22)
+#define CPPI5_INFO0_HDESC_PKTLEN_SHIFT (0)
+#define CPPI5_INFO0_HDESC_PKTLEN_MASK GENMASK(21, 0)
+
+#define CPPI5_INFO1_DESC_PKTERROR_SHIFT (28U)
+#define CPPI5_INFO1_DESC_PKTERROR_MASK GENMASK(31, 28)
+#define CPPI5_INFO1_HDESC_PSFLGS_SHIFT (24U)
+#define CPPI5_INFO1_HDESC_PSFLGS_MASK GENMASK(27, 24)
+#define CPPI5_INFO1_DESC_PKTID_SHIFT (14U)
+#define CPPI5_INFO1_DESC_PKTID_MASK GENMASK(23, 14)
+#define CPPI5_INFO1_DESC_FLOWID_SHIFT (0)
+#define CPPI5_INFO1_DESC_FLOWID_MASK GENMASK(13, 0)
+#define CPPI5_INFO1_DESC_FLOWID_DEFAULT CPPI5_INFO1_DESC_FLOWID_MASK
+
+#define CPPI5_INFO2_HDESC_PKTTYPE_SHIFT (27U)
+#define CPPI5_INFO2_HDESC_PKTTYPE_MASK GENMASK(31, 27)
+/* Return Policy: 0 - Entire packet 1 - Each buffer */
+#define CPPI5_INFO2_HDESC_RETPOLICY BIT(18)
+/*
+ * Early Return:
+ * 0 = desc pointers should be returned after all reads have been completed
+ * 1 = desc pointers should be returned immediately upon fetching
+ * the descriptor and beginning to transfer data.
+ */
+#define CPPI5_INFO2_HDESC_EARLYRET BIT(17)
+/*
+ * Return Push Policy:
+ * 0 = Descriptor must be returned to tail of queue
+ * 1 = Descriptor must be returned to head of queue
+ */
+#define CPPI5_INFO2_DESC_RETPUSHPOLICY BIT(16)
+#define CPPI5_INFO2_DESC_RETP_MASK GENMASK(18, 16)
+
+#define CPPI5_INFO2_DESC_RETQ_SHIFT (0)
+#define CPPI5_INFO2_DESC_RETQ_MASK GENMASK(15, 0)
+
+#define CPPI5_INFO3_DESC_SRCTAG_SHIFT (16U)
+#define CPPI5_INFO3_DESC_SRCTAG_MASK GENMASK(31, 16)
+#define CPPI5_INFO3_DESC_DSTTAG_SHIFT (0)
+#define CPPI5_INFO3_DESC_DSTTAG_MASK GENMASK(15, 0)
+
+#define CPPI5_BUFINFO1_HDESC_DATA_LEN_SHIFT (0)
+#define CPPI5_BUFINFO1_HDESC_DATA_LEN_MASK GENMASK(27, 0)
+
+#define CPPI5_OBUFINFO0_HDESC_BUF_LEN_SHIFT (0)
+#define CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK GENMASK(27, 0)
+
+/**
+ * struct cppi5_desc_epib_t - Host Packet Descriptor Extended Packet Info Block
+ * @timestamp: word 0: application specific timestamp
+ * @sw_info0: word 1: Software Info 0
+ * @sw_info1: word 1: Software Info 1
+ * @sw_info2: word 1: Software Info 2
+ */
+struct cppi5_desc_epib_t {
+ u32 timestamp; /* w0: application specific timestamp */
+ u32 sw_info0; /* w1: Software Info 0 */
+ u32 sw_info1; /* w2: Software Info 1 */
+ u32 sw_info2; /* w3: Software Info 2 */
+};
+
+/**
+ * struct cppi5_monolithic_desc_t - Monolithic-mode packet descriptor
+ * @hdr: Descriptor header
+ * @epib[0]: Extended Packet Info Data (optional, 4 words), and/or
+ * Protocol Specific Data (optional, 0-128 bytes in
+ * multiples of 4), and/or
+ * Other Software Data (0-N bytes, optional)
+ */
+struct cppi5_monolithic_desc_t {
+ struct cppi5_desc_hdr_t hdr;
+ u32 epib[0];
+};
+
+#define CPPI5_INFO2_MDESC_DATA_OFFSET_SHIFT (18U)
+#define CPPI5_INFO2_MDESC_DATA_OFFSET_MASK GENMASK(26, 18)
+
+/*
+ * Reload Count:
+ * 0 = Finish the packet and place the descriptor back on the return queue
+ * 1-0x1ff = Vector to the Reload Index and resume processing
+ * 0x1ff indicates perpetual loop, infinite reload until the channel is stopped
+ */
+#define CPPI5_INFO0_TRDESC_RLDCNT_SHIFT (20U)
+#define CPPI5_INFO0_TRDESC_RLDCNT_MASK GENMASK(28, 20)
+#define CPPI5_INFO0_TRDESC_RLDCNT_MAX (0x1ff)
+#define CPPI5_INFO0_TRDESC_RLDCNT_INFINITE CPPI5_INFO0_TRDESC_RLDCNT_MAX
+#define CPPI5_INFO0_TRDESC_RLDIDX_SHIFT (14U)
+#define CPPI5_INFO0_TRDESC_RLDIDX_MASK GENMASK(19, 14)
+#define CPPI5_INFO0_TRDESC_RLDIDX_MAX (0x3f)
+#define CPPI5_INFO0_TRDESC_LASTIDX_SHIFT (0)
+#define CPPI5_INFO0_TRDESC_LASTIDX_MASK GENMASK(13, 0)
+
+#define CPPI5_INFO1_TRDESC_RECSIZE_SHIFT (24U)
+#define CPPI5_INFO1_TRDESC_RECSIZE_MASK GENMASK(26, 24)
+#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_16B (0)
+#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_32B (1U)
+#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_64B (2U)
+#define CPPI5_INFO1_TRDESC_RECSIZE_VAL_128B (3U)
+
+static inline void cppi5_desc_dump(void *desc, u32 size)
+{
+ print_hex_dump(KERN_ERR, "dump udmap_desc: ", DUMP_PREFIX_NONE,
+ 32, 4, desc, size, false);
+}
+
+#define CPPI5_TDCM_MARKER (0x1)
+/**
+ * cppi5_desc_is_tdcm - check if the paddr indicates Teardown Complete Message
+ * @paddr: Physical address of the packet popped from the ring
+ *
+ * Returns true if the address indicates TDCM
+ */
+static inline bool cppi5_desc_is_tdcm(dma_addr_t paddr)
+{
+ return (paddr & CPPI5_TDCM_MARKER) ? true : false;
+}
+
+/**
+ * cppi5_desc_get_type - get descriptor type
+ * @desc_hdr: packet descriptor/TR header
+ *
+ * Returns descriptor type:
+ * CPPI5_INFO0_DESC_TYPE_VAL_HOST
+ * CPPI5_INFO0_DESC_TYPE_VAL_MONO
+ * CPPI5_INFO0_DESC_TYPE_VAL_TR
+ */
+static inline u32 cppi5_desc_get_type(struct cppi5_desc_hdr_t *desc_hdr)
+{
+ return (desc_hdr->pkt_info0 & CPPI5_INFO0_HDESC_TYPE_MASK) >>
+ CPPI5_INFO0_HDESC_TYPE_SHIFT;
+}
+
+/**
+ * cppi5_desc_get_errflags - get Error Flags from Desc
+ * @desc_hdr: packet/TR descriptor header
+ *
+ * Returns Error Flags from Packet/TR Descriptor
+ */
+static inline u32 cppi5_desc_get_errflags(struct cppi5_desc_hdr_t *desc_hdr)
+{
+ return (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_PKTERROR_MASK) >>
+ CPPI5_INFO1_DESC_PKTERROR_SHIFT;
+}
+
+/**
+ * cppi5_desc_get_pktids - get Packet and Flow ids from Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @pkt_id: Packet ID
+ * @flow_id: Flow ID
+ *
+ * Returns Packet and Flow ids from packet/TR descriptor
+ */
+static inline void cppi5_desc_get_pktids(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 *pkt_id, u32 *flow_id)
+{
+ *pkt_id = (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_PKTID_MASK) >>
+ CPPI5_INFO1_DESC_PKTID_SHIFT;
+ *flow_id = (desc_hdr->pkt_info1 & CPPI5_INFO1_DESC_FLOWID_MASK) >>
+ CPPI5_INFO1_DESC_FLOWID_SHIFT;
+}
+
+/**
+ * cppi5_desc_set_pktids - set Packet and Flow ids in Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @pkt_id: Packet ID
+ * @flow_id: Flow ID
+ */
+static inline void cppi5_desc_set_pktids(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 pkt_id, u32 flow_id)
+{
+ desc_hdr->pkt_info1 &= ~(CPPI5_INFO1_DESC_PKTID_MASK |
+ CPPI5_INFO1_DESC_FLOWID_MASK);
+ desc_hdr->pkt_info1 |= (pkt_id << CPPI5_INFO1_DESC_PKTID_SHIFT) &
+ CPPI5_INFO1_DESC_PKTID_MASK;
+ desc_hdr->pkt_info1 |= (flow_id << CPPI5_INFO1_DESC_FLOWID_SHIFT) &
+ CPPI5_INFO1_DESC_FLOWID_MASK;
+}
+
+/**
+ * cppi5_desc_set_retpolicy - set Packet Return Policy in Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @flags: fags, supported values
+ * CPPI5_INFO2_HDESC_RETPOLICY
+ * CPPI5_INFO2_HDESC_EARLYRET
+ * CPPI5_INFO2_DESC_RETPUSHPOLICY
+ * @return_ring_id: Packet Return Queue/Ring id, value 0xFFFF reserved
+ */
+static inline void cppi5_desc_set_retpolicy(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 flags, u32 return_ring_id)
+{
+ desc_hdr->pkt_info2 &= ~(CPPI5_INFO2_DESC_RETP_MASK |
+ CPPI5_INFO2_DESC_RETQ_MASK);
+ desc_hdr->pkt_info2 |= flags & CPPI5_INFO2_DESC_RETP_MASK;
+ desc_hdr->pkt_info2 |= return_ring_id & CPPI5_INFO2_DESC_RETQ_MASK;
+}
+
+/**
+ * cppi5_desc_get_tags_ids - get Packet Src/Dst Tags from Desc
+ * @desc_hdr: packet/TR descriptor header
+ * @src_tag_id: Source Tag
+ * @dst_tag_id: Dest Tag
+ *
+ * Returns Packet Src/Dst Tags from packet/TR descriptor
+ */
+static inline void cppi5_desc_get_tags_ids(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 *src_tag_id, u32 *dst_tag_id)
+{
+ if (src_tag_id)
+ *src_tag_id = (desc_hdr->src_dst_tag &
+ CPPI5_INFO3_DESC_SRCTAG_MASK) >>
+ CPPI5_INFO3_DESC_SRCTAG_SHIFT;
+ if (dst_tag_id)
+ *dst_tag_id = desc_hdr->src_dst_tag &
+ CPPI5_INFO3_DESC_DSTTAG_MASK;
+}
+
+/**
+ * cppi5_desc_set_tags_ids - set Packet Src/Dst Tags in HDesc
+ * @desc_hdr: packet/TR descriptor header
+ * @src_tag_id: Source Tag
+ * @dst_tag_id: Dest Tag
+ *
+ * Returns Packet Src/Dst Tags from packet/TR descriptor
+ */
+static inline void cppi5_desc_set_tags_ids(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 src_tag_id, u32 dst_tag_id)
+{
+ desc_hdr->src_dst_tag = (src_tag_id << CPPI5_INFO3_DESC_SRCTAG_SHIFT) &
+ CPPI5_INFO3_DESC_SRCTAG_MASK;
+ desc_hdr->src_dst_tag |= dst_tag_id & CPPI5_INFO3_DESC_DSTTAG_MASK;
+}
+
+/**
+ * cppi5_hdesc_calc_size - Calculate Host Packet Descriptor size
+ * @epib: is EPIB present
+ * @psdata_size: PSDATA size
+ * @sw_data_size: SWDATA size
+ *
+ * Returns required Host Packet Descriptor size
+ * 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE
+ */
+static inline u32 cppi5_hdesc_calc_size(bool epib, u32 psdata_size,
+ u32 sw_data_size)
+{
+ u32 desc_size;
+
+ if (psdata_size > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE)
+ return 0;
+
+ desc_size = sizeof(struct cppi5_host_desc_t) + psdata_size +
+ sw_data_size;
+
+ if (epib)
+ desc_size += CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+ return ALIGN(desc_size, CPPI5_DESC_MIN_ALIGN);
+}
+
+/**
+ * cppi5_hdesc_init - Init Host Packet Descriptor size
+ * @desc: Host packet descriptor
+ * @flags: supported values
+ * CPPI5_INFO0_HDESC_EPIB_PRESENT
+ * CPPI5_INFO0_HDESC_PSINFO_LOCATION
+ * @psdata_size: PSDATA size
+ *
+ * Returns required Host Packet Descriptor size
+ * 0 - if PSDATA > CPPI5_INFO0_HDESC_PSDATA_MAX_SIZE
+ */
+static inline void cppi5_hdesc_init(struct cppi5_host_desc_t *desc, u32 flags,
+ u32 psdata_size)
+{
+ desc->hdr.pkt_info0 = (CPPI5_INFO0_DESC_TYPE_VAL_HOST <<
+ CPPI5_INFO0_HDESC_TYPE_SHIFT) | (flags);
+ desc->hdr.pkt_info0 |= ((psdata_size >> 2) <<
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT) &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK;
+ desc->next_desc = 0;
+}
+
+/**
+ * cppi5_hdesc_update_flags - Replace descriptor flags
+ * @desc: Host packet descriptor
+ * @flags: supported values
+ * CPPI5_INFO0_HDESC_EPIB_PRESENT
+ * CPPI5_INFO0_HDESC_PSINFO_LOCATION
+ */
+static inline void cppi5_hdesc_update_flags(struct cppi5_host_desc_t *desc,
+ u32 flags)
+{
+ desc->hdr.pkt_info0 &= ~(CPPI5_INFO0_HDESC_EPIB_PRESENT |
+ CPPI5_INFO0_HDESC_PSINFO_LOCATION);
+ desc->hdr.pkt_info0 |= flags;
+}
+
+/**
+ * cppi5_hdesc_update_psdata_size - Replace PSdata size
+ * @desc: Host packet descriptor
+ * @psdata_size: PSDATA size
+ */
+static inline void
+cppi5_hdesc_update_psdata_size(struct cppi5_host_desc_t *desc, u32 psdata_size)
+{
+ desc->hdr.pkt_info0 &= ~CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK;
+ desc->hdr.pkt_info0 |= ((psdata_size >> 2) <<
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT) &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK;
+}
+
+/**
+ * cppi5_hdesc_get_psdata_size - get PSdata size in bytes
+ * @desc: Host packet descriptor
+ */
+static inline u32 cppi5_hdesc_get_psdata_size(struct cppi5_host_desc_t *desc)
+{
+ u32 psdata_size = 0;
+
+ if (!(desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION))
+ psdata_size = (desc->hdr.pkt_info0 &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >>
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT;
+
+ return (psdata_size << 2);
+}
+
+/**
+ * cppi5_hdesc_get_pktlen - get Packet Length from HDesc
+ * @desc: Host packet descriptor
+ *
+ * Returns Packet Length from Host Packet Descriptor
+ */
+static inline u32 cppi5_hdesc_get_pktlen(struct cppi5_host_desc_t *desc)
+{
+ return (desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PKTLEN_MASK);
+}
+
+/**
+ * cppi5_hdesc_set_pktlen - set Packet Length in HDesc
+ * @desc: Host packet descriptor
+ */
+static inline void cppi5_hdesc_set_pktlen(struct cppi5_host_desc_t *desc,
+ u32 pkt_len)
+{
+ desc->hdr.pkt_info0 &= ~CPPI5_INFO0_HDESC_PKTLEN_MASK;
+ desc->hdr.pkt_info0 |= (pkt_len & CPPI5_INFO0_HDESC_PKTLEN_MASK);
+}
+
+/**
+ * cppi5_hdesc_get_psflags - get Protocol Specific Flags from HDesc
+ * @desc: Host packet descriptor
+ *
+ * Returns Protocol Specific Flags from Host Packet Descriptor
+ */
+static inline u32 cppi5_hdesc_get_psflags(struct cppi5_host_desc_t *desc)
+{
+ return (desc->hdr.pkt_info1 & CPPI5_INFO1_HDESC_PSFLGS_MASK) >>
+ CPPI5_INFO1_HDESC_PSFLGS_SHIFT;
+}
+
+/**
+ * cppi5_hdesc_set_psflags - set Protocol Specific Flags in HDesc
+ * @desc: Host packet descriptor
+ */
+static inline void cppi5_hdesc_set_psflags(struct cppi5_host_desc_t *desc,
+ u32 ps_flags)
+{
+ desc->hdr.pkt_info1 &= ~CPPI5_INFO1_HDESC_PSFLGS_MASK;
+ desc->hdr.pkt_info1 |= (ps_flags <<
+ CPPI5_INFO1_HDESC_PSFLGS_SHIFT) &
+ CPPI5_INFO1_HDESC_PSFLGS_MASK;
+}
+
+/**
+ * cppi5_hdesc_get_errflags - get Packet Type from HDesc
+ * @desc: Host packet descriptor
+ */
+static inline u32 cppi5_hdesc_get_pkttype(struct cppi5_host_desc_t *desc)
+{
+ return (desc->hdr.pkt_info2 & CPPI5_INFO2_HDESC_PKTTYPE_MASK) >>
+ CPPI5_INFO2_HDESC_PKTTYPE_SHIFT;
+}
+
+/**
+ * cppi5_hdesc_get_errflags - set Packet Type in HDesc
+ * @desc: Host packet descriptor
+ * @pkt_type: Packet Type
+ */
+static inline void cppi5_hdesc_set_pkttype(struct cppi5_host_desc_t *desc,
+ u32 pkt_type)
+{
+ desc->hdr.pkt_info2 &= ~CPPI5_INFO2_HDESC_PKTTYPE_MASK;
+ desc->hdr.pkt_info2 |=
+ (pkt_type << CPPI5_INFO2_HDESC_PKTTYPE_SHIFT) &
+ CPPI5_INFO2_HDESC_PKTTYPE_MASK;
+}
+
+/**
+ * cppi5_hdesc_attach_buf - attach buffer to HDesc
+ * @desc: Host packet descriptor
+ * @buf: Buffer physical address
+ * @buf_data_len: Buffer length
+ * @obuf: Original Buffer physical address
+ * @obuf_len: Original Buffer length
+ *
+ * Attaches buffer to Host Packet Descriptor
+ */
+static inline void cppi5_hdesc_attach_buf(struct cppi5_host_desc_t *desc,
+ dma_addr_t buf, u32 buf_data_len,
+ dma_addr_t obuf, u32 obuf_len)
+{
+ desc->buf_ptr = buf;
+ desc->buf_info1 = buf_data_len & CPPI5_BUFINFO1_HDESC_DATA_LEN_MASK;
+ desc->org_buf_ptr = obuf;
+ desc->org_buf_len = obuf_len & CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK;
+}
+
+static inline void cppi5_hdesc_get_obuf(struct cppi5_host_desc_t *desc,
+ dma_addr_t *obuf, u32 *obuf_len)
+{
+ *obuf = desc->org_buf_ptr;
+ *obuf_len = desc->org_buf_len & CPPI5_OBUFINFO0_HDESC_BUF_LEN_MASK;
+}
+
+static inline void cppi5_hdesc_reset_to_original(struct cppi5_host_desc_t *desc)
+{
+ desc->buf_ptr = desc->org_buf_ptr;
+ desc->buf_info1 = desc->org_buf_len;
+}
+
+/**
+ * cppi5_hdesc_link_hbdesc - link Host Buffer Descriptor to HDesc
+ * @desc: Host Packet Descriptor
+ * @buf_desc: Host Buffer Descriptor physical address
+ *
+ * add and link Host Buffer Descriptor to HDesc
+ */
+static inline void cppi5_hdesc_link_hbdesc(struct cppi5_host_desc_t *desc,
+ dma_addr_t hbuf_desc)
+{
+ desc->next_desc = hbuf_desc;
+}
+
+static inline dma_addr_t
+cppi5_hdesc_get_next_hbdesc(struct cppi5_host_desc_t *desc)
+{
+ return (dma_addr_t)desc->next_desc;
+}
+
+static inline void cppi5_hdesc_reset_hbdesc(struct cppi5_host_desc_t *desc)
+{
+ desc->hdr = (struct cppi5_desc_hdr_t) { 0 };
+ desc->next_desc = 0;
+}
+
+/**
+ * cppi5_hdesc_epib_present - check if EPIB present
+ * @desc_hdr: packet descriptor/TR header
+ *
+ * Returns true if EPIB present in the packet
+ */
+static inline bool cppi5_hdesc_epib_present(struct cppi5_desc_hdr_t *desc_hdr)
+{
+ return !!(desc_hdr->pkt_info0 & CPPI5_INFO0_HDESC_EPIB_PRESENT);
+}
+
+/**
+ * cppi5_hdesc_get_psdata - Get pointer on PSDATA
+ * @desc: Host packet descriptor
+ *
+ * Returns pointer on PSDATA in HDesc.
+ * NULL - if ps_data placed at the start of data buffer.
+ */
+static inline void *cppi5_hdesc_get_psdata(struct cppi5_host_desc_t *desc)
+{
+ u32 psdata_size;
+ void *psdata;
+
+ if (desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION)
+ return NULL;
+
+ psdata_size = (desc->hdr.pkt_info0 &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >>
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT;
+
+ if (!psdata_size)
+ return NULL;
+
+ psdata = &desc->epib;
+
+ if (cppi5_hdesc_epib_present(&desc->hdr))
+ psdata += CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+ return psdata;
+}
+
+/**
+ * cppi5_hdesc_get_swdata - Get pointer on swdata
+ * @desc: Host packet descriptor
+ *
+ * Returns pointer on SWDATA in HDesc.
+ * NOTE. It's caller responsibility to be sure hdesc actually has swdata.
+ */
+static inline void *cppi5_hdesc_get_swdata(struct cppi5_host_desc_t *desc)
+{
+ u32 psdata_size = 0;
+ void *swdata;
+
+ if (!(desc->hdr.pkt_info0 & CPPI5_INFO0_HDESC_PSINFO_LOCATION))
+ psdata_size = (desc->hdr.pkt_info0 &
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_MASK) >>
+ CPPI5_INFO0_HDESC_PSINFO_SIZE_SHIFT;
+
+ swdata = &desc->epib;
+
+ if (cppi5_hdesc_epib_present(&desc->hdr))
+ swdata += CPPI5_INFO0_HDESC_EPIB_SIZE;
+
+ swdata += (psdata_size << 2);
+
+ return swdata;
+}
+
+/* ================================== TR ================================== */
+
+#define CPPI5_TR_TYPE_SHIFT (0U)
+#define CPPI5_TR_TYPE_MASK GENMASK(3, 0)
+#define CPPI5_TR_STATIC BIT(4)
+#define CPPI5_TR_WAIT BIT(5)
+#define CPPI5_TR_EVENT_SIZE_SHIFT (6U)
+#define CPPI5_TR_EVENT_SIZE_MASK GENMASK(7, 6)
+#define CPPI5_TR_TRIGGER0_SHIFT (8U)
+#define CPPI5_TR_TRIGGER0_MASK GENMASK(9, 8)
+#define CPPI5_TR_TRIGGER0_TYPE_SHIFT (10U)
+#define CPPI5_TR_TRIGGER0_TYPE_MASK GENMASK(11, 10)
+#define CPPI5_TR_TRIGGER1_SHIFT (12U)
+#define CPPI5_TR_TRIGGER1_MASK GENMASK(13, 12)
+#define CPPI5_TR_TRIGGER1_TYPE_SHIFT (14U)
+#define CPPI5_TR_TRIGGER1_TYPE_MASK GENMASK(15, 14)
+#define CPPI5_TR_CMD_ID_SHIFT (16U)
+#define CPPI5_TR_CMD_ID_MASK GENMASK(23, 16)
+#define CPPI5_TR_CSF_FLAGS_SHIFT (24U)
+#define CPPI5_TR_CSF_FLAGS_MASK GENMASK(31, 24)
+#define CPPI5_TR_CSF_SA_INDIRECT BIT(0)
+#define CPPI5_TR_CSF_DA_INDIRECT BIT(1)
+#define CPPI5_TR_CSF_SUPR_EVT BIT(2)
+#define CPPI5_TR_CSF_EOL_ADV_SHIFT (4U)
+#define CPPI5_TR_CSF_EOL_ADV_MASK GENMASK(6, 4)
+#define CPPI5_TR_CSF_EOP BIT(7)
+
+/**
+ * enum cppi5_tr_types - TR types
+ * @CPPI5_TR_TYPE0: One dimensional data move
+ * @CPPI5_TR_TYPE1: Two dimensional data move
+ * @CPPI5_TR_TYPE2: Three dimensional data move
+ * @CPPI5_TR_TYPE3: Four dimensional data move
+ * @CPPI5_TR_TYPE4: Four dimensional data move with data formatting
+ * @CPPI5_TR_TYPE5: Four dimensional Cache Warm
+ * @CPPI5_TR_TYPE8: Four Dimensional Block Move
+ * @CPPI5_TR_TYPE9: Four Dimensional Block Move with Repacking
+ * @CPPI5_TR_TYPE10: Two Dimensional Block Move
+ * @CPPI5_TR_TYPE11: Two Dimensional Block Move with Repacking
+ * @CPPI5_TR_TYPE15: Four Dimensional Block Move with Repacking and
+ * Indirection
+ */
+enum cppi5_tr_types {
+ CPPI5_TR_TYPE0 = 0,
+ CPPI5_TR_TYPE1,
+ CPPI5_TR_TYPE2,
+ CPPI5_TR_TYPE3,
+ CPPI5_TR_TYPE4,
+ CPPI5_TR_TYPE5,
+ /* type6-7: Reserved */
+ CPPI5_TR_TYPE8 = 8,
+ CPPI5_TR_TYPE9,
+ CPPI5_TR_TYPE10,
+ CPPI5_TR_TYPE11,
+ /* type12-14: Reserved */
+ CPPI5_TR_TYPE15 = 15,
+ CPPI5_TR_TYPE_MAX
+};
+
+/**
+ * enum cppi5_tr_event_size - TR Flags EVENT_SIZE field specifies when an event
+ * is generated for each TR.
+ * @CPPI5_TR_EVENT_SIZE_COMPLETION: When TR is complete and all status for
+ * the TR has been received
+ * @CPPI5_TR_EVENT_SIZE_ICNT1_DEC: Type 0: when the last data transaction
+ * is sent for the TR
+ * Type 1-11: when ICNT1 is decremented
+ * @CPPI5_TR_EVENT_SIZE_ICNT2_DEC: Type 0-1,10-11: when the last data
+ * transaction is sent for the TR
+ * All other types: when ICNT2 is
+ * decremented
+ * @CPPI5_TR_EVENT_SIZE_ICNT3_DEC: Type 0-2,10-11: when the last data
+ * transaction is sent for the TR
+ * All other types: when ICNT3 is
+ * decremented
+ */
+enum cppi5_tr_event_size {
+ CPPI5_TR_EVENT_SIZE_COMPLETION,
+ CPPI5_TR_EVENT_SIZE_ICNT1_DEC,
+ CPPI5_TR_EVENT_SIZE_ICNT2_DEC,
+ CPPI5_TR_EVENT_SIZE_ICNT3_DEC,
+ CPPI5_TR_EVENT_SIZE_MAX
+};
+
+/**
+ * enum cppi5_tr_trigger - TR Flags TRIGGERx field specifies the type of trigger
+ * used to enable the TR to transfer data as specified
+ * by TRIGGERx_TYPE field.
+ * @CPPI5_TR_TRIGGER_NONE: No trigger
+ * @CPPI5_TR_TRIGGER_GLOBAL0: Global trigger 0
+ * @CPPI5_TR_TRIGGER_GLOBAL1: Global trigger 1
+ * @CPPI5_TR_TRIGGER_LOCAL_EVENT: Local Event
+ */
+enum cppi5_tr_trigger {
+ CPPI5_TR_TRIGGER_NONE,
+ CPPI5_TR_TRIGGER_GLOBAL0,
+ CPPI5_TR_TRIGGER_GLOBAL1,
+ CPPI5_TR_TRIGGER_LOCAL_EVENT,
+ CPPI5_TR_TRIGGER_MAX
+};
+
+/**
+ * enum cppi5_tr_trigger_type - TR Flags TRIGGERx_TYPE field specifies the type
+ * of data transfer that will be enabled by
+ * receiving a trigger as specified by TRIGGERx.
+ * @CPPI5_TR_TRIGGER_TYPE_ICNT1_DEC: The second inner most loop (ICNT1) will
+ * be decremented by 1
+ * @CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC: The third inner most loop (ICNT2) will
+ * be decremented by 1
+ * @CPPI5_TR_TRIGGER_TYPE_ICNT3_DEC: The outer most loop (ICNT3) will be
+ * decremented by 1
+ * @CPPI5_TR_TRIGGER_TYPE_ALL: The entire TR will be allowed to
+ * complete
+ */
+enum cppi5_tr_trigger_type {
+ CPPI5_TR_TRIGGER_TYPE_ICNT1_DEC,
+ CPPI5_TR_TRIGGER_TYPE_ICNT2_DEC,
+ CPPI5_TR_TRIGGER_TYPE_ICNT3_DEC,
+ CPPI5_TR_TRIGGER_TYPE_ALL,
+ CPPI5_TR_TRIGGER_TYPE_MAX
+};
+
+typedef u32 cppi5_tr_flags_t;
+
+/**
+ * struct cppi5_tr_type0_t - Type 0 (One dimensional data move) TR (16 byte)
+ * @flags: TR flags (type, triggers, event, configuration)
+ * @icnt0: Total loop iteration count for level 0 (innermost)
+ * @_reserved: Not used
+ * @addr: Starting address for the source data or destination data
+ */
+struct cppi5_tr_type0_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 _reserved;
+ u64 addr;
+} __aligned(16) __packed;
+
+/**
+ * struct cppi5_tr_type1_t - Type 1 (Two dimensional data move) TR (32 byte)
+ * @flags: TR flags (type, triggers, event, configuration)
+ * @icnt0: Total loop iteration count for level 0 (innermost)
+ * @icnt1: Total loop iteration count for level 1
+ * @addr: Starting address for the source data or destination data
+ * @dim1: Signed dimension for loop level 1
+ */
+struct cppi5_tr_type1_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 icnt1;
+ u64 addr;
+ s32 dim1;
+} __aligned(32) __packed;
+
+/**
+ * struct cppi5_tr_type2_t - Type 2 (Three dimensional data move) TR (32 byte)
+ * @flags: TR flags (type, triggers, event, configuration)
+ * @icnt0: Total loop iteration count for level 0 (innermost)
+ * @icnt1: Total loop iteration count for level 1
+ * @addr: Starting address for the source data or destination data
+ * @dim1: Signed dimension for loop level 1
+ * @icnt2: Total loop iteration count for level 2
+ * @_reserved: Not used
+ * @dim2: Signed dimension for loop level 2
+ */
+struct cppi5_tr_type2_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 icnt1;
+ u64 addr;
+ s32 dim1;
+ u16 icnt2;
+ u16 _reserved;
+ s32 dim2;
+} __aligned(32) __packed;
+
+/**
+ * struct cppi5_tr_type3_t - Type 3 (Four dimensional data move) TR (32 byte)
+ * @flags: TR flags (type, triggers, event, configuration)
+ * @icnt0: Total loop iteration count for level 0 (innermost)
+ * @icnt1: Total loop iteration count for level 1
+ * @addr: Starting address for the source data or destination data
+ * @dim1: Signed dimension for loop level 1
+ * @icnt2: Total loop iteration count for level 2
+ * @icnt3: Total loop iteration count for level 3 (outermost)
+ * @dim2: Signed dimension for loop level 2
+ * @dim3: Signed dimension for loop level 3
+ */
+struct cppi5_tr_type3_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 icnt1;
+ u64 addr;
+ s32 dim1;
+ u16 icnt2;
+ u16 icnt3;
+ s32 dim2;
+ s32 dim3;
+} __aligned(32) __packed;
+
+/**
+ * struct cppi5_tr_type15_t - Type 15 (Four Dimensional Block Copy with
+ * Repacking and Indirection Support) TR (64 byte)
+ * @flags: TR flags (type, triggers, event, configuration)
+ * @icnt0: Total loop iteration count for level 0 (innermost) for
+ * source
+ * @icnt1: Total loop iteration count for level 1 for source
+ * @addr: Starting address for the source data
+ * @dim1: Signed dimension for loop level 1 for source
+ * @icnt2: Total loop iteration count for level 2 for source
+ * @icnt3: Total loop iteration count for level 3 (outermost) for
+ * source
+ * @dim2: Signed dimension for loop level 2 for source
+ * @dim3: Signed dimension for loop level 3 for source
+ * @_reserved: Not used
+ * @ddim1: Signed dimension for loop level 1 for destination
+ * @daddr: Starting address for the destination data
+ * @ddim2: Signed dimension for loop level 2 for destination
+ * @ddim3: Signed dimension for loop level 3 for destination
+ * @dicnt0: Total loop iteration count for level 0 (innermost) for
+ * destination
+ * @dicnt1: Total loop iteration count for level 1 for destination
+ * @dicnt2: Total loop iteration count for level 2 for destination
+ * @sicnt3: Total loop iteration count for level 3 (outermost) for
+ * destination
+ */
+struct cppi5_tr_type15_t {
+ cppi5_tr_flags_t flags;
+ u16 icnt0;
+ u16 icnt1;
+ u64 addr;
+ s32 dim1;
+ u16 icnt2;
+ u16 icnt3;
+ s32 dim2;
+ s32 dim3;
+ u32 _reserved;
+ s32 ddim1;
+ u64 daddr;
+ s32 ddim2;
+ s32 ddim3;
+ u16 dicnt0;
+ u16 dicnt1;
+ u16 dicnt2;
+ u16 dicnt3;
+} __aligned(64) __packed;
+
+/**
+ * struct cppi5_tr_resp_t - TR response record
+ * @status: Status type and info
+ * @_reserved: Not used
+ * @cmd_id: Command ID for the TR for TR identification
+ * @flags: Configuration Specific Flags
+ */
+struct cppi5_tr_resp_t {
+ u8 status;
+ u8 _reserved;
+ u8 cmd_id;
+ u8 flags;
+} __packed;
+
+#define CPPI5_TR_RESPONSE_STATUS_TYPE_SHIFT (0U)
+#define CPPI5_TR_RESPONSE_STATUS_TYPE_MASK GENMASK(3, 0)
+#define CPPI5_TR_RESPONSE_STATUS_INFO_SHIFT (4U)
+#define CPPI5_TR_RESPONSE_STATUS_INFO_MASK GENMASK(7, 4)
+#define CPPI5_TR_RESPONSE_CMDID_SHIFT (16U)
+#define CPPI5_TR_RESPONSE_CMDID_MASK GENMASK(23, 16)
+#define CPPI5_TR_RESPONSE_CFG_SPECIFIC_SHIFT (24U)
+#define CPPI5_TR_RESPONSE_CFG_SPECIFIC_MASK GENMASK(31, 24)
+
+/**
+ * enum cppi5_tr_resp_status_type - TR Response Status Type field is used to
+ * determine what type of status is being
+ * returned.
+ * @CPPI5_TR_RESPONSE_STATUS_NONE: No error, completion: completed
+ * @CPPI5_TR_RESPONSE_STATUS_TRANSFER_ERR: Transfer Error, completion: none
+ * or partially completed
+ * @CPPI5_TR_RESPONSE_STATUS_ABORTED_ERR: Aborted Error, completion: none
+ * or partially completed
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ERR: Submission Error, completion:
+ * none
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR: Unsupported Error, completion:
+ * none
+ * @CPPI5_TR_RESPONSE_STATUS_TRANSFER_EXCEPTION: Transfer Exception, completion:
+ * partially completed
+ * @CPPI5_TR_RESPONSE_STATUS__TEARDOWN_FLUSH: Teardown Flush, completion: none
+ */
+enum cppi5_tr_resp_status_type {
+ CPPI5_TR_RESPONSE_STATUS_NONE,
+ CPPI5_TR_RESPONSE_STATUS_TRANSFER_ERR,
+ CPPI5_TR_RESPONSE_STATUS_ABORTED_ERR,
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ERR,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ERR,
+ CPPI5_TR_RESPONSE_STATUS_TRANSFER_EXCEPTION,
+ CPPI5_TR_RESPONSE_STATUS__TEARDOWN_FLUSH,
+ CPPI5_TR_RESPONSE_STATUS_MAX
+};
+
+/**
+ * enum cppi5_tr_resp_status_submission - TR Response Status field values which
+ * corresponds Submission Error
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ICNT0: ICNT0 was 0
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_FIFO_FULL: Channel FIFO was full when TR
+ * received
+ * @CPPI5_TR_RESPONSE_STATUS_SUBMISSION_OWN: Channel is not owned by the
+ * submitter
+ */
+enum cppi5_tr_resp_status_submission {
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_ICNT0,
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_FIFO_FULL,
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_OWN,
+ CPPI5_TR_RESPONSE_STATUS_SUBMISSION_MAX
+};
+
+/**
+ * enum cppi5_tr_resp_status_unsupported - TR Response Status field values which
+ * corresponds Unsupported Error
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_TR_TYPE: TR Type not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_STATIC: STATIC not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_EOL: EOL not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_CFG_SPECIFIC: CONFIGURATION SPECIFIC
+ * not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE: AMODE not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ELTYPE: ELTYPE not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_DFMT: DFMT not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_SECTR: SECTR not supported
+ * @CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE_SPECIFIC: AMODE SPECIFIC field
+ * not supported
+ */
+enum cppi5_tr_resp_status_unsupported {
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_TR_TYPE,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_STATIC,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_EOL,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_CFG_SPECIFIC,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_ELTYPE,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_DFMT,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_SECTR,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_AMODE_SPECIFIC,
+ CPPI5_TR_RESPONSE_STATUS_UNSUPPORTED_MAX
+};
+
+/**
+ * cppi5_trdesc_calc_size - Calculate TR Descriptor size
+ * @tr_count: number of TR records
+ * @tr_size: Nominal size of TR record (max) [16, 32, 64, 128]
+ *
+ * Returns required TR Descriptor size
+ */
+static inline size_t cppi5_trdesc_calc_size(u32 tr_count, u32 tr_size)
+{
+ /*
+ * The Size of a TR descriptor is:
+ * 1 x tr_size : the first 16 bytes is used by the packet info block +
+ * tr_count x tr_size : Transfer Request Records +
+ * tr_count x sizeof(struct cppi5_tr_resp_t) : Transfer Response Records
+ */
+ return tr_size * (tr_count + 1) +
+ sizeof(struct cppi5_tr_resp_t) * tr_count;
+}
+
+/**
+ * cppi5_trdesc_init - Init TR Descriptor
+ * @desc: TR Descriptor
+ * @tr_count: number of TR records
+ * @tr_size: Nominal size of TR record (max) [16, 32, 64, 128]
+ * @reload_idx: Absolute index to jump to on the 2nd and following passes
+ * through the TR packet.
+ * @reload_count: Number of times to jump from last entry to reload_idx. 0x1ff
+ * indicates infinite looping.
+ *
+ * Init TR Descriptor
+ */
+static inline void cppi5_trdesc_init(struct cppi5_desc_hdr_t *desc_hdr,
+ u32 tr_count, u32 tr_size, u32 reload_idx,
+ u32 reload_count)
+{
+ desc_hdr->pkt_info0 = CPPI5_INFO0_DESC_TYPE_VAL_TR <<
+ CPPI5_INFO0_HDESC_TYPE_SHIFT;
+ desc_hdr->pkt_info0 |=
+ (reload_count << CPPI5_INFO0_TRDESC_RLDCNT_SHIFT) &
+ CPPI5_INFO0_TRDESC_RLDCNT_MASK;
+ desc_hdr->pkt_info0 |=
+ (reload_idx << CPPI5_INFO0_TRDESC_RLDIDX_SHIFT) &
+ CPPI5_INFO0_TRDESC_RLDIDX_MASK;
+ desc_hdr->pkt_info0 |= (tr_count - 1) & CPPI5_INFO0_TRDESC_LASTIDX_MASK;
+
+ desc_hdr->pkt_info1 |= ((ffs(tr_size >> 4) - 1) <<
+ CPPI5_INFO1_TRDESC_RECSIZE_SHIFT) &
+ CPPI5_INFO1_TRDESC_RECSIZE_MASK;
+}
+
+/**
+ * cppi5_tr_init - Init TR record
+ * @flags: Pointer to the TR's flags
+ * @type: TR type
+ * @static_tr: TR is static
+ * @wait: Wait for TR completion before allow the next TR to start
+ * @event_size: output event generation cfg
+ * @cmd_id: TR identifier (application specifics)
+ *
+ * Init TR record
+ */
+static inline void cppi5_tr_init(cppi5_tr_flags_t *flags,
+ enum cppi5_tr_types type, bool static_tr,
+ bool wait, enum cppi5_tr_event_size event_size,
+ u32 cmd_id)
+{
+ *flags = type;
+ *flags |= (event_size << CPPI5_TR_EVENT_SIZE_SHIFT) &
+ CPPI5_TR_EVENT_SIZE_MASK;
+
+ *flags |= (cmd_id << CPPI5_TR_CMD_ID_SHIFT) &
+ CPPI5_TR_CMD_ID_MASK;
+
+ if (static_tr && (type == CPPI5_TR_TYPE8 || type == CPPI5_TR_TYPE9))
+ *flags |= CPPI5_TR_STATIC;
+
+ if (wait)
+ *flags |= CPPI5_TR_WAIT;
+}
+
+/**
+ * cppi5_tr_set_trigger - Configure trigger0/1 and trigger0/1_type
+ * @flags: Pointer to the TR's flags
+ * @trigger0: trigger0 selection
+ * @trigger0_type: type of data transfer that will be enabled by trigger0
+ * @trigger1: trigger1 selection
+ * @trigger1_type: type of data transfer that will be enabled by trigger1
+ *
+ * Configure the triggers for the TR
+ */
+static inline void cppi5_tr_set_trigger(cppi5_tr_flags_t *flags,
+ enum cppi5_tr_trigger trigger0,
+ enum cppi5_tr_trigger_type trigger0_type,
+ enum cppi5_tr_trigger trigger1,
+ enum cppi5_tr_trigger_type trigger1_type)
+{
+ *flags &= ~(CPPI5_TR_TRIGGER0_MASK | CPPI5_TR_TRIGGER0_TYPE_MASK |
+ CPPI5_TR_TRIGGER1_MASK | CPPI5_TR_TRIGGER1_TYPE_MASK);
+ *flags |= (trigger0 << CPPI5_TR_TRIGGER0_SHIFT) &
+ CPPI5_TR_TRIGGER0_MASK;
+ *flags |= (trigger0_type << CPPI5_TR_TRIGGER0_TYPE_SHIFT) &
+ CPPI5_TR_TRIGGER0_TYPE_MASK;
+
+ *flags |= (trigger1 << CPPI5_TR_TRIGGER1_SHIFT) &
+ CPPI5_TR_TRIGGER1_MASK;
+ *flags |= (trigger1_type << CPPI5_TR_TRIGGER1_TYPE_SHIFT) &
+ CPPI5_TR_TRIGGER1_TYPE_MASK;
+}
+
+/**
+ * cppi5_tr_cflag_set - Update the Configuration specific flags
+ * @flags: Pointer to the TR's flags
+ * @csf: Configuration specific flags
+ *
+ * Set a bit in Configuration Specific Flags section of the TR flags.
+ */
+static inline void cppi5_tr_csf_set(cppi5_tr_flags_t *flags, u32 csf)
+{
+ *flags &= ~CPPI5_TR_CSF_FLAGS_MASK;
+ *flags |= (csf << CPPI5_TR_CSF_FLAGS_SHIFT) &
+ CPPI5_TR_CSF_FLAGS_MASK;
+}
+
+#endif /* __TI_CPPI5_H__ */
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index 8fcdee1c0cf9..64461fc64e1b 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -219,6 +219,62 @@ typedef struct { DECLARE_BITMAP(bits, DMA_TX_TYPE_END); } dma_cap_mask_t;
* @bytes_transferred: byte counter
*/
+/**
+ * enum dma_desc_metadata_mode - per descriptor metadata mode types supported
+ * @DESC_METADATA_CLIENT - the metadata buffer is allocated/provided by the
+ * client driver and it is attached (via the dmaengine_desc_attach_metadata()
+ * helper) to the descriptor.
+ *
+ * Client drivers interested to use this mode can follow:
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * construct the metadata in the client's buffer
+ * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ * descriptor
+ * 3. submit the transfer
+ * - DMA_DEV_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. use dmaengine_desc_attach_metadata() to attach the buffer to the
+ * descriptor
+ * 3. submit the transfer
+ * 4. when the transfer is completed, the metadata should be available in the
+ * attached buffer
+ *
+ * @DESC_METADATA_ENGINE - the metadata buffer is allocated/managed by the DMA
+ * driver. The client driver can ask for the pointer, maximum size and the
+ * currently used size of the metadata and can directly update or read it.
+ * dmaengine_desc_get_metadata_ptr() and dmaengine_desc_set_metadata_len() is
+ * provided as helper functions.
+ *
+ * Note: the metadata area for the descriptor is no longer valid after the
+ * transfer has been completed (valid up to the point when the completion
+ * callback returns if used).
+ *
+ * Client drivers interested to use this mode can follow:
+ * - DMA_MEM_TO_DEV / DEV_MEM_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. use dmaengine_desc_get_metadata_ptr() to get the pointer to the engine's
+ * metadata area
+ * 3. update the metadata at the pointer
+ * 4. use dmaengine_desc_set_metadata_len() to tell the DMA engine the amount
+ * of data the client has placed into the metadata buffer
+ * 5. submit the transfer
+ * - DMA_DEV_TO_MEM:
+ * 1. prepare the descriptor (dmaengine_prep_*)
+ * 2. submit the transfer
+ * 3. on transfer completion, use dmaengine_desc_get_metadata_ptr() to get the
+ * pointer to the engine's metadata area
+ * 4. Read out the metadata from the pointer
+ *
+ * Note: the two mode is not compatible and clients must use one mode for a
+ * descriptor.
+ */
+enum dma_desc_metadata_mode {
+ DESC_METADATA_NONE = 0,
+ DESC_METADATA_CLIENT = BIT(0),
+ DESC_METADATA_ENGINE = BIT(1),
+};
+
struct dma_chan_percpu {
/* stats */
unsigned long memcpy_count;
@@ -238,10 +294,12 @@ struct dma_router {
/**
* struct dma_chan - devices supply DMA channels, clients use them
* @device: ptr to the dma device who supplies this channel, always !%NULL
+ * @slave: ptr to the device using this channel
* @cookie: last cookie value returned to client
* @completed_cookie: last completed cookie for this channel
* @chan_id: channel ID for sysfs
* @dev: class device for sysfs
+ * @name: backlink name for sysfs
* @device_node: used to add this to the device chan list
* @local: per-cpu pointer to a struct dma_chan_percpu
* @client_count: how many clients are using this channel
@@ -252,12 +310,14 @@ struct dma_router {
*/
struct dma_chan {
struct dma_device *device;
+ struct device *slave;
dma_cookie_t cookie;
dma_cookie_t completed_cookie;
/* sysfs */
int chan_id;
struct dma_chan_dev *dev;
+ const char *name;
struct list_head device_node;
struct dma_chan_percpu __percpu *local;
@@ -475,19 +535,36 @@ struct dmaengine_unmap_data {
dma_addr_t addr[0];
};
+struct dma_async_tx_descriptor;
+
+struct dma_descriptor_metadata_ops {
+ int (*attach)(struct dma_async_tx_descriptor *desc, void *data,
+ size_t len);
+
+ void *(*get_ptr)(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len);
+ int (*set_len)(struct dma_async_tx_descriptor *desc,
+ size_t payload_len);
+};
+
/**
* struct dma_async_tx_descriptor - async transaction descriptor
* ---dma generic offload fields---
* @cookie: tracking cookie for this transaction, set to -EBUSY if
* this tx is sitting on a dependency list
* @flags: flags to augment operation preparation, control completion, and
- * communicate status
+ * communicate status
* @phys: physical address of the descriptor
* @chan: target channel for this operation
* @tx_submit: accept the descriptor, assign ordered cookie and mark the
* descriptor pending. To be pushed on .issue_pending() call
* @callback: routine to call after this operation is complete
* @callback_param: general parameter to pass to the callback routine
+ * @desc_metadata_mode: core managed metadata mode to protect mixed use of
+ * DESC_METADATA_CLIENT or DESC_METADATA_ENGINE. Otherwise
+ * DESC_METADATA_NONE
+ * @metadata_ops: DMA driver provided metadata mode ops, need to be set by the
+ * DMA driver if metadata mode is supported with the descriptor
* ---async_tx api specific fields---
* @next: at completion submit this descriptor
* @parent: pointer to the next level up in the dependency chain
@@ -504,6 +581,8 @@ struct dma_async_tx_descriptor {
dma_async_tx_callback_result callback_result;
void *callback_param;
struct dmaengine_unmap_data *unmap;
+ enum dma_desc_metadata_mode desc_metadata_mode;
+ struct dma_descriptor_metadata_ops *metadata_ops;
#ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
struct dma_async_tx_descriptor *next;
struct dma_async_tx_descriptor *parent;
@@ -611,11 +690,13 @@ static inline struct dma_async_tx_descriptor *txd_next(struct dma_async_tx_descr
* @residue: the remaining number of bytes left to transmit
* on the selected transfer for states DMA_IN_PROGRESS and
* DMA_PAUSED if this is implemented in the driver, else 0
+ * @in_flight_bytes: amount of data in bytes cached by the DMA.
*/
struct dma_tx_state {
dma_cookie_t last;
dma_cookie_t used;
u32 residue;
+ u32 in_flight_bytes;
};
/**
@@ -666,6 +747,7 @@ struct dma_filter {
* @global_node: list_head for global dma_device_list
* @filter: information for device/slave to filter function/param mapping
* @cap_mask: one or more dma_capability flags
+ * @desc_metadata_modes: supported metadata modes by the DMA device
* @max_xor: maximum number of xor sources, 0 if no capability
* @max_pq: maximum number of PQ sources and PQ-continue capability
* @copy_align: alignment shift for memcpy operations
@@ -674,6 +756,7 @@ struct dma_filter {
* @fill_align: alignment shift for memset operations
* @dev_id: unique device ID
* @dev: struct device reference for dma mapping api
+ * @owner: owner module (automatically set based on the provided dev)
* @src_addr_widths: bit mask of src addr widths the device supports
* Width is specified in bytes, e.g. for a device supporting
* a width of 4 the mask should have BIT(4) set.
@@ -718,15 +801,21 @@ struct dma_filter {
* will just return a simple status code
* @device_issue_pending: push pending transactions to hardware
* @descriptor_reuse: a submitted transfer can be resubmitted after completion
+ * @device_release: called sometime atfer dma_async_device_unregister() is
+ * called and there are no further references to this structure. This
+ * must be implemented to free resources however many existing drivers
+ * do not and are therefore not safe to unbind while in use.
+ *
*/
struct dma_device {
-
+ struct kref ref;
unsigned int chancnt;
unsigned int privatecnt;
struct list_head channels;
struct list_head global_node;
struct dma_filter filter;
dma_cap_mask_t cap_mask;
+ enum dma_desc_metadata_mode desc_metadata_modes;
unsigned short max_xor;
unsigned short max_pq;
enum dmaengine_alignment copy_align;
@@ -737,6 +826,7 @@ struct dma_device {
int dev_id;
struct device *dev;
+ struct module *owner;
u32 src_addr_widths;
u32 dst_addr_widths;
@@ -800,6 +890,7 @@ struct dma_device {
dma_cookie_t cookie,
struct dma_tx_state *txstate);
void (*device_issue_pending)(struct dma_chan *chan);
+ void (*device_release)(struct dma_device *dev);
};
static inline int dmaengine_slave_config(struct dma_chan *chan,
@@ -902,6 +993,41 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_memcpy(
len, flags);
}
+static inline bool dmaengine_is_metadata_mode_supported(struct dma_chan *chan,
+ enum dma_desc_metadata_mode mode)
+{
+ if (!chan)
+ return false;
+
+ return !!(chan->device->desc_metadata_modes & mode);
+}
+
+#ifdef CONFIG_DMA_ENGINE
+int dmaengine_desc_attach_metadata(struct dma_async_tx_descriptor *desc,
+ void *data, size_t len);
+void *dmaengine_desc_get_metadata_ptr(struct dma_async_tx_descriptor *desc,
+ size_t *payload_len, size_t *max_len);
+int dmaengine_desc_set_metadata_len(struct dma_async_tx_descriptor *desc,
+ size_t payload_len);
+#else /* CONFIG_DMA_ENGINE */
+static inline int dmaengine_desc_attach_metadata(
+ struct dma_async_tx_descriptor *desc, void *data, size_t len)
+{
+ return -EINVAL;
+}
+static inline void *dmaengine_desc_get_metadata_ptr(
+ struct dma_async_tx_descriptor *desc, size_t *payload_len,
+ size_t *max_len)
+{
+ return NULL;
+}
+static inline int dmaengine_desc_set_metadata_len(
+ struct dma_async_tx_descriptor *desc, size_t payload_len)
+{
+ return -EINVAL;
+}
+#endif /* CONFIG_DMA_ENGINE */
+
/**
* dmaengine_terminate_all() - Terminate all active DMA transfers
* @chan: The channel for which to terminate the transfers
@@ -1364,8 +1490,11 @@ static inline int dma_get_slave_caps(struct dma_chan *chan,
static inline int dmaengine_desc_set_reuse(struct dma_async_tx_descriptor *tx)
{
struct dma_slave_caps caps;
+ int ret;
- dma_get_slave_caps(tx->chan, &caps);
+ ret = dma_get_slave_caps(tx->chan, &caps);
+ if (ret)
+ return ret;
if (caps.descriptor_reuse) {
tx->flags |= DMA_CTRL_REUSE;
@@ -1399,16 +1528,16 @@ static inline int dmaengine_desc_free(struct dma_async_tx_descriptor *desc)
int dma_async_device_register(struct dma_device *device);
int dmaenginem_async_device_register(struct dma_device *device);
void dma_async_device_unregister(struct dma_device *device);
+int dma_async_device_channel_register(struct dma_device *device,
+ struct dma_chan *chan);
+void dma_async_device_channel_unregister(struct dma_device *device,
+ struct dma_chan *chan);
void dma_run_dependencies(struct dma_async_tx_descriptor *tx);
-struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
-struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
#define dma_request_channel(mask, x, y) \
__dma_request_channel(&(mask), x, y, NULL)
-#define dma_request_slave_channel_compat(mask, x, y, dev, name) \
- __dma_request_slave_channel_compat(&(mask), x, y, dev, name)
static inline struct dma_chan
-*__dma_request_slave_channel_compat(const dma_cap_mask_t *mask,
+*dma_request_slave_channel_compat(const dma_cap_mask_t mask,
dma_filter_fn fn, void *fn_param,
struct device *dev, const char *name)
{
@@ -1421,6 +1550,25 @@ static inline struct dma_chan
if (!fn || !fn_param)
return NULL;
- return __dma_request_channel(mask, fn, fn_param, NULL);
+ return __dma_request_channel(&mask, fn, fn_param, NULL);
+}
+
+static inline char *
+dmaengine_get_direction_text(enum dma_transfer_direction dir)
+{
+ switch (dir) {
+ case DMA_DEV_TO_MEM:
+ return "DEV_TO_MEM";
+ case DMA_MEM_TO_DEV:
+ return "MEM_TO_DEV";
+ case DMA_MEM_TO_MEM:
+ return "MEM_TO_MEM";
+ case DMA_DEV_TO_DEV:
+ return "DEV_TO_DEV";
+ default:
+ break;
+ }
+
+ return "invalid";
}
#endif /* DMAENGINE_H */
diff --git a/include/linux/dmar.h b/include/linux/dmar.h
index a7cf3599d9a1..d7bf029df737 100644
--- a/include/linux/dmar.h
+++ b/include/linux/dmar.h
@@ -69,19 +69,23 @@ struct dmar_pci_notify_info {
extern struct rw_semaphore dmar_global_lock;
extern struct list_head dmar_drhd_units;
-#define for_each_drhd_unit(drhd) \
- list_for_each_entry_rcu(drhd, &dmar_drhd_units, list)
+#define for_each_drhd_unit(drhd) \
+ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
+ dmar_rcu_check())
#define for_each_active_drhd_unit(drhd) \
- list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
+ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
+ dmar_rcu_check()) \
if (drhd->ignored) {} else
#define for_each_active_iommu(i, drhd) \
- list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
+ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
+ dmar_rcu_check()) \
if (i=drhd->iommu, drhd->ignored) {} else
#define for_each_iommu(i, drhd) \
- list_for_each_entry_rcu(drhd, &dmar_drhd_units, list) \
+ list_for_each_entry_rcu(drhd, &dmar_drhd_units, list, \
+ dmar_rcu_check()) \
if (i=drhd->iommu, 0) {} else
static inline bool dmar_rcu_check(void)
@@ -129,6 +133,7 @@ static inline int dmar_res_noop(struct acpi_dmar_header *hdr, void *arg)
#ifdef CONFIG_INTEL_IOMMU
extern int iommu_detected, no_iommu;
extern int intel_iommu_init(void);
+extern void intel_iommu_shutdown(void);
extern int dmar_parse_one_rmrr(struct acpi_dmar_header *header, void *arg);
extern int dmar_parse_one_atsr(struct acpi_dmar_header *header, void *arg);
extern int dmar_check_one_atsr(struct acpi_dmar_header *hdr, void *arg);
@@ -137,6 +142,7 @@ extern int dmar_iommu_hotplug(struct dmar_drhd_unit *dmaru, bool insert);
extern int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info);
#else /* !CONFIG_INTEL_IOMMU: */
static inline int intel_iommu_init(void) { return -ENODEV; }
+static inline void intel_iommu_shutdown(void) { }
#define dmar_parse_one_rmrr dmar_res_noop
#define dmar_parse_one_atsr dmar_res_noop
diff --git a/include/linux/dmi.h b/include/linux/dmi.h
index 8de8c4f15163..927f8a8b7a1d 100644
--- a/include/linux/dmi.h
+++ b/include/linux/dmi.h
@@ -113,6 +113,8 @@ extern int dmi_walk(void (*decode)(const struct dmi_header *, void *),
extern bool dmi_match(enum dmi_field f, const char *str);
extern void dmi_memdev_name(u16 handle, const char **bank, const char **device);
extern u64 dmi_memdev_size(u16 handle);
+extern u8 dmi_memdev_type(u16 handle);
+extern u16 dmi_memdev_handle(int slot);
#else
@@ -142,6 +144,8 @@ static inline bool dmi_match(enum dmi_field f, const char *str)
static inline void dmi_memdev_name(u16 handle, const char **bank,
const char **device) { }
static inline u64 dmi_memdev_size(u16 handle) { return ~0ul; }
+static inline u8 dmi_memdev_type(u16 handle) { return 0x0; }
+static inline u16 dmi_memdev_handle(int slot) { return 0xffff; }
static inline const struct dmi_system_id *
dmi_first_match(const struct dmi_system_id *list) { return NULL; }
diff --git a/include/linux/dsa/8021q.h b/include/linux/dsa/8021q.h
index 0aa803c451a3..c620d9139c28 100644
--- a/include/linux/dsa/8021q.h
+++ b/include/linux/dsa/8021q.h
@@ -28,8 +28,6 @@ int dsa_8021q_rx_switch_id(u16 vid);
int dsa_8021q_rx_source_port(u16 vid);
-struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb);
-
#else
int dsa_port_setup_8021q_tagging(struct dsa_switch *ds, int index,
@@ -64,11 +62,6 @@ int dsa_8021q_rx_source_port(u16 vid)
return 0;
}
-struct sk_buff *dsa_8021q_remove_header(struct sk_buff *skb)
-{
- return NULL;
-}
-
#endif /* IS_ENABLED(CONFIG_NET_DSA_TAG_8021Q) */
#endif /* _NET_DSA_8021Q_H */
diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h
index 897e799dbcb9..fa5735c353cd 100644
--- a/include/linux/dsa/sja1105.h
+++ b/include/linux/dsa/sja1105.h
@@ -37,8 +37,6 @@
* the structure defined in struct sja1105_private.
*/
struct sja1105_tagger_data {
- struct sk_buff_head skb_rxtstamp_queue;
- struct work_struct rxtstamp_work;
struct sk_buff *stampable_skb;
/* Protects concurrent access to the meta state machine
* from taggers running on multiple ports on SMP systems
@@ -55,10 +53,12 @@ struct sja1105_skb_cb {
((struct sja1105_skb_cb *)DSA_SKB_CB_PRIV(skb))
struct sja1105_port {
+ struct kthread_worker *xmit_worker;
+ struct kthread_work xmit_work;
+ struct sk_buff_head xmit_queue;
struct sja1105_tagger_data *data;
struct dsa_port *dp;
bool hwts_tx_en;
- int mgmt_slot;
};
#endif /* _NET_DSA_SJA1105_H */
diff --git a/include/linux/dw_apb_timer.h b/include/linux/dw_apb_timer.h
index 14f072edbca5..82ebf9223948 100644
--- a/include/linux/dw_apb_timer.h
+++ b/include/linux/dw_apb_timer.h
@@ -25,7 +25,6 @@ struct dw_apb_timer {
struct dw_apb_clock_event_device {
struct clock_event_device ced;
struct dw_apb_timer timer;
- struct irqaction irqaction;
void (*eoi)(struct dw_apb_timer *);
};
diff --git a/include/linux/edac.h b/include/linux/edac.h
index c19483b90079..0f20b986b0ab 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -362,78 +362,6 @@ struct edac_mc_layer {
*/
#define EDAC_MAX_LAYERS 3
-/**
- * EDAC_DIMM_OFF - Macro responsible to get a pointer offset inside a pointer
- * array for the element given by [layer0,layer1,layer2]
- * position
- *
- * @layers: a struct edac_mc_layer array, describing how many elements
- * were allocated for each layer
- * @nlayers: Number of layers at the @layers array
- * @layer0: layer0 position
- * @layer1: layer1 position. Unused if n_layers < 2
- * @layer2: layer2 position. Unused if n_layers < 3
- *
- * For 1 layer, this macro returns "var[layer0] - var";
- *
- * For 2 layers, this macro is similar to allocate a bi-dimensional array
- * and to return "var[layer0][layer1] - var";
- *
- * For 3 layers, this macro is similar to allocate a tri-dimensional array
- * and to return "var[layer0][layer1][layer2] - var".
- *
- * A loop could be used here to make it more generic, but, as we only have
- * 3 layers, this is a little faster.
- *
- * By design, layers can never be 0 or more than 3. If that ever happens,
- * a NULL is returned, causing an OOPS during the memory allocation routine,
- * with would point to the developer that he's doing something wrong.
- */
-#define EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2) ({ \
- int __i; \
- if ((nlayers) == 1) \
- __i = layer0; \
- else if ((nlayers) == 2) \
- __i = (layer1) + ((layers[1]).size * (layer0)); \
- else if ((nlayers) == 3) \
- __i = (layer2) + ((layers[2]).size * ((layer1) + \
- ((layers[1]).size * (layer0)))); \
- else \
- __i = -EINVAL; \
- __i; \
-})
-
-/**
- * EDAC_DIMM_PTR - Macro responsible to get a pointer inside a pointer array
- * for the element given by [layer0,layer1,layer2] position
- *
- * @layers: a struct edac_mc_layer array, describing how many elements
- * were allocated for each layer
- * @var: name of the var where we want to get the pointer
- * (like mci->dimms)
- * @nlayers: Number of layers at the @layers array
- * @layer0: layer0 position
- * @layer1: layer1 position. Unused if n_layers < 2
- * @layer2: layer2 position. Unused if n_layers < 3
- *
- * For 1 layer, this macro returns "var[layer0]";
- *
- * For 2 layers, this macro is similar to allocate a bi-dimensional array
- * and to return "var[layer0][layer1]";
- *
- * For 3 layers, this macro is similar to allocate a tri-dimensional array
- * and to return "var[layer0][layer1][layer2]";
- */
-#define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \
- typeof(*var) __p; \
- int ___i = EDAC_DIMM_OFF(layers, nlayers, layer0, layer1, layer2); \
- if (___i < 0) \
- __p = NULL; \
- else \
- __p = (var)[___i]; \
- __p; \
-})
-
struct dimm_info {
struct device dev;
@@ -443,6 +371,7 @@ struct dimm_info {
unsigned int location[EDAC_MAX_LAYERS];
struct mem_ctl_info *mci; /* the parent */
+ unsigned int idx; /* index within the parent dimm array */
u32 grain; /* granularity of reported error in bytes */
enum dev_type dtype; /* memory device type */
@@ -454,6 +383,9 @@ struct dimm_info {
unsigned int csrow, cschannel; /* Points to the old API data */
u16 smbios_handle; /* Handle for SMBIOS type 17 */
+
+ u32 ce_count;
+ u32 ue_count;
};
/**
@@ -513,6 +445,7 @@ struct errcount_attribute_data {
* struct edac_raw_error_desc - Raw error report structure
* @grain: minimum granularity for an error report, in bytes
* @error_count: number of errors of the same type
+ * @type: severity of the error (CE/UE/Fatal)
* @top_layer: top layer of the error (layer[0])
* @mid_layer: middle layer of the error (layer[1])
* @low_layer: low layer of the error (layer[2])
@@ -524,20 +457,14 @@ struct errcount_attribute_data {
* @location: location of the error
* @label: label of the affected DIMM(s)
* @other_detail: other driver-specific detail about the error
- * @enable_per_layer_report: if false, the error affects all layers
- * (typically, a memory controller error)
*/
struct edac_raw_error_desc {
- /*
- * NOTE: everything before grain won't be cleaned by
- * edac_raw_error_desc_clean()
- */
char location[LOCATION_SIZE];
char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * EDAC_MAX_LABELS];
long grain;
- /* the vars below and grain will be cleaned on every new error report */
u16 error_count;
+ enum hw_event_mc_err_type type;
int top_layer;
int mid_layer;
int low_layer;
@@ -546,7 +473,6 @@ struct edac_raw_error_desc {
unsigned long syndrome;
const char *msg;
const char *other_detail;
- bool enable_per_layer_report;
};
/* MEMORY controller information structure
@@ -636,7 +562,6 @@ struct mem_ctl_info {
*/
u32 ce_noinfo_count, ue_noinfo_count;
u32 ue_mc, ce_mc;
- u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
struct completion complete;
@@ -669,4 +594,70 @@ struct mem_ctl_info {
bool fake_inject_ue;
u16 fake_inject_count;
};
-#endif
+
+#define mci_for_each_dimm(mci, dimm) \
+ for ((dimm) = (mci)->dimms[0]; \
+ (dimm); \
+ (dimm) = (dimm)->idx + 1 < (mci)->tot_dimms \
+ ? (mci)->dimms[(dimm)->idx + 1] \
+ : NULL)
+
+/**
+ * edac_get_dimm_by_index - Get DIMM info at @index from a memory
+ * controller
+ *
+ * @mci: MC descriptor struct mem_ctl_info
+ * @index: index in the memory controller's DIMM array
+ *
+ * Returns a struct dimm_info * or NULL on failure.
+ */
+static inline struct dimm_info *
+edac_get_dimm_by_index(struct mem_ctl_info *mci, int index)
+{
+ if (index < 0 || index >= mci->tot_dimms)
+ return NULL;
+
+ if (WARN_ON_ONCE(mci->dimms[index]->idx != index))
+ return NULL;
+
+ return mci->dimms[index];
+}
+
+/**
+ * edac_get_dimm - Get DIMM info from a memory controller given by
+ * [layer0,layer1,layer2] position
+ *
+ * @mci: MC descriptor struct mem_ctl_info
+ * @layer0: layer0 position
+ * @layer1: layer1 position. Unused if n_layers < 2
+ * @layer2: layer2 position. Unused if n_layers < 3
+ *
+ * For 1 layer, this function returns "dimms[layer0]";
+ *
+ * For 2 layers, this function is similar to allocating a two-dimensional
+ * array and returning "dimms[layer0][layer1]";
+ *
+ * For 3 layers, this function is similar to allocating a tri-dimensional
+ * array and returning "dimms[layer0][layer1][layer2]";
+ */
+static inline struct dimm_info *edac_get_dimm(struct mem_ctl_info *mci,
+ int layer0, int layer1, int layer2)
+{
+ int index;
+
+ if (layer0 < 0
+ || (mci->n_layers > 1 && layer1 < 0)
+ || (mci->n_layers > 2 && layer2 < 0))
+ return NULL;
+
+ index = layer0;
+
+ if (mci->n_layers > 1)
+ index = index * mci->layers[1].size + layer1;
+
+ if (mci->n_layers > 2)
+ index = index * mci->layers[2].size + layer2;
+
+ return edac_get_dimm_by_index(mci, index);
+}
+#endif /* _LINUX_EDAC_H_ */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index d87acf62958e..251f1f783cdf 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -48,6 +48,14 @@ typedef u16 efi_char16_t; /* UNICODE character */
typedef u64 efi_physical_addr_t;
typedef void *efi_handle_t;
+#if defined(CONFIG_X86_64)
+#define __efiapi __attribute__((ms_abi))
+#elif defined(CONFIG_X86_32)
+#define __efiapi __attribute__((regparm(0)))
+#else
+#define __efiapi
+#endif
+
/*
* The UEFI spec and EDK2 reference implementation both define EFI_GUID as
* struct { u32 a; u16; b; u16 c; u8 d[8]; }; and so the implied alignment
@@ -112,6 +120,7 @@ typedef struct {
#define EFI_MEMORY_MORE_RELIABLE \
((u64)0x0000000000010000ULL) /* higher reliability */
#define EFI_MEMORY_RO ((u64)0x0000000000020000ULL) /* read-only */
+#define EFI_MEMORY_SP ((u64)0x0000000000040000ULL) /* soft reserved */
#define EFI_MEMORY_RUNTIME ((u64)0x8000000000000000ULL) /* range requires runtime mapping */
#define EFI_MEMORY_DESCRIPTOR_VERSION 1
@@ -135,15 +144,6 @@ typedef struct {
u32 imagesize;
} efi_capsule_header_t;
-struct efi_boot_memmap {
- efi_memory_desc_t **map;
- unsigned long *map_size;
- unsigned long *desc_size;
- u32 *desc_ver;
- unsigned long *key_ptr;
- unsigned long *buff_size;
-};
-
/*
* EFI capsule flags
*/
@@ -165,14 +165,6 @@ struct capsule_info {
int __efi_capsule_setup_info(struct capsule_info *cap_info);
-/*
- * Allocation types for calls to boottime->allocate_pages.
- */
-#define EFI_ALLOCATE_ANY_PAGES 0
-#define EFI_ALLOCATE_MAX_ADDRESS 1
-#define EFI_ALLOCATE_ADDRESS 2
-#define EFI_MAX_ALLOCATE_TYPE 3
-
typedef int (*efi_freemem_callback_t) (u64 start, u64 end, void *arg);
/*
@@ -202,324 +194,7 @@ typedef struct {
u8 sets_to_zero;
} efi_time_cap_t;
-typedef struct {
- efi_table_hdr_t hdr;
- u32 raise_tpl;
- u32 restore_tpl;
- u32 allocate_pages;
- u32 free_pages;
- u32 get_memory_map;
- u32 allocate_pool;
- u32 free_pool;
- u32 create_event;
- u32 set_timer;
- u32 wait_for_event;
- u32 signal_event;
- u32 close_event;
- u32 check_event;
- u32 install_protocol_interface;
- u32 reinstall_protocol_interface;
- u32 uninstall_protocol_interface;
- u32 handle_protocol;
- u32 __reserved;
- u32 register_protocol_notify;
- u32 locate_handle;
- u32 locate_device_path;
- u32 install_configuration_table;
- u32 load_image;
- u32 start_image;
- u32 exit;
- u32 unload_image;
- u32 exit_boot_services;
- u32 get_next_monotonic_count;
- u32 stall;
- u32 set_watchdog_timer;
- u32 connect_controller;
- u32 disconnect_controller;
- u32 open_protocol;
- u32 close_protocol;
- u32 open_protocol_information;
- u32 protocols_per_handle;
- u32 locate_handle_buffer;
- u32 locate_protocol;
- u32 install_multiple_protocol_interfaces;
- u32 uninstall_multiple_protocol_interfaces;
- u32 calculate_crc32;
- u32 copy_mem;
- u32 set_mem;
- u32 create_event_ex;
-} __packed efi_boot_services_32_t;
-
-typedef struct {
- efi_table_hdr_t hdr;
- u64 raise_tpl;
- u64 restore_tpl;
- u64 allocate_pages;
- u64 free_pages;
- u64 get_memory_map;
- u64 allocate_pool;
- u64 free_pool;
- u64 create_event;
- u64 set_timer;
- u64 wait_for_event;
- u64 signal_event;
- u64 close_event;
- u64 check_event;
- u64 install_protocol_interface;
- u64 reinstall_protocol_interface;
- u64 uninstall_protocol_interface;
- u64 handle_protocol;
- u64 __reserved;
- u64 register_protocol_notify;
- u64 locate_handle;
- u64 locate_device_path;
- u64 install_configuration_table;
- u64 load_image;
- u64 start_image;
- u64 exit;
- u64 unload_image;
- u64 exit_boot_services;
- u64 get_next_monotonic_count;
- u64 stall;
- u64 set_watchdog_timer;
- u64 connect_controller;
- u64 disconnect_controller;
- u64 open_protocol;
- u64 close_protocol;
- u64 open_protocol_information;
- u64 protocols_per_handle;
- u64 locate_handle_buffer;
- u64 locate_protocol;
- u64 install_multiple_protocol_interfaces;
- u64 uninstall_multiple_protocol_interfaces;
- u64 calculate_crc32;
- u64 copy_mem;
- u64 set_mem;
- u64 create_event_ex;
-} __packed efi_boot_services_64_t;
-
-/*
- * EFI Boot Services table
- */
-typedef struct {
- efi_table_hdr_t hdr;
- void *raise_tpl;
- void *restore_tpl;
- efi_status_t (*allocate_pages)(int, int, unsigned long,
- efi_physical_addr_t *);
- efi_status_t (*free_pages)(efi_physical_addr_t, unsigned long);
- efi_status_t (*get_memory_map)(unsigned long *, void *, unsigned long *,
- unsigned long *, u32 *);
- efi_status_t (*allocate_pool)(int, unsigned long, void **);
- efi_status_t (*free_pool)(void *);
- void *create_event;
- void *set_timer;
- void *wait_for_event;
- void *signal_event;
- void *close_event;
- void *check_event;
- void *install_protocol_interface;
- void *reinstall_protocol_interface;
- void *uninstall_protocol_interface;
- efi_status_t (*handle_protocol)(efi_handle_t, efi_guid_t *, void **);
- void *__reserved;
- void *register_protocol_notify;
- efi_status_t (*locate_handle)(int, efi_guid_t *, void *,
- unsigned long *, efi_handle_t *);
- void *locate_device_path;
- efi_status_t (*install_configuration_table)(efi_guid_t *, void *);
- void *load_image;
- void *start_image;
- void *exit;
- void *unload_image;
- efi_status_t (*exit_boot_services)(efi_handle_t, unsigned long);
- void *get_next_monotonic_count;
- void *stall;
- void *set_watchdog_timer;
- void *connect_controller;
- void *disconnect_controller;
- void *open_protocol;
- void *close_protocol;
- void *open_protocol_information;
- void *protocols_per_handle;
- void *locate_handle_buffer;
- efi_status_t (*locate_protocol)(efi_guid_t *, void *, void **);
- void *install_multiple_protocol_interfaces;
- void *uninstall_multiple_protocol_interfaces;
- void *calculate_crc32;
- void *copy_mem;
- void *set_mem;
- void *create_event_ex;
-} efi_boot_services_t;
-
-typedef enum {
- EfiPciIoWidthUint8,
- EfiPciIoWidthUint16,
- EfiPciIoWidthUint32,
- EfiPciIoWidthUint64,
- EfiPciIoWidthFifoUint8,
- EfiPciIoWidthFifoUint16,
- EfiPciIoWidthFifoUint32,
- EfiPciIoWidthFifoUint64,
- EfiPciIoWidthFillUint8,
- EfiPciIoWidthFillUint16,
- EfiPciIoWidthFillUint32,
- EfiPciIoWidthFillUint64,
- EfiPciIoWidthMaximum
-} EFI_PCI_IO_PROTOCOL_WIDTH;
-
-typedef enum {
- EfiPciIoAttributeOperationGet,
- EfiPciIoAttributeOperationSet,
- EfiPciIoAttributeOperationEnable,
- EfiPciIoAttributeOperationDisable,
- EfiPciIoAttributeOperationSupported,
- EfiPciIoAttributeOperationMaximum
-} EFI_PCI_IO_PROTOCOL_ATTRIBUTE_OPERATION;
-
-typedef struct {
- u32 read;
- u32 write;
-} efi_pci_io_protocol_access_32_t;
-
-typedef struct {
- u64 read;
- u64 write;
-} efi_pci_io_protocol_access_64_t;
-
-typedef struct {
- void *read;
- void *write;
-} efi_pci_io_protocol_access_t;
-
-typedef struct {
- u32 poll_mem;
- u32 poll_io;
- efi_pci_io_protocol_access_32_t mem;
- efi_pci_io_protocol_access_32_t io;
- efi_pci_io_protocol_access_32_t pci;
- u32 copy_mem;
- u32 map;
- u32 unmap;
- u32 allocate_buffer;
- u32 free_buffer;
- u32 flush;
- u32 get_location;
- u32 attributes;
- u32 get_bar_attributes;
- u32 set_bar_attributes;
- u64 romsize;
- u32 romimage;
-} efi_pci_io_protocol_32_t;
-
-typedef struct {
- u64 poll_mem;
- u64 poll_io;
- efi_pci_io_protocol_access_64_t mem;
- efi_pci_io_protocol_access_64_t io;
- efi_pci_io_protocol_access_64_t pci;
- u64 copy_mem;
- u64 map;
- u64 unmap;
- u64 allocate_buffer;
- u64 free_buffer;
- u64 flush;
- u64 get_location;
- u64 attributes;
- u64 get_bar_attributes;
- u64 set_bar_attributes;
- u64 romsize;
- u64 romimage;
-} efi_pci_io_protocol_64_t;
-
-typedef struct {
- void *poll_mem;
- void *poll_io;
- efi_pci_io_protocol_access_t mem;
- efi_pci_io_protocol_access_t io;
- efi_pci_io_protocol_access_t pci;
- void *copy_mem;
- void *map;
- void *unmap;
- void *allocate_buffer;
- void *free_buffer;
- void *flush;
- void *get_location;
- void *attributes;
- void *get_bar_attributes;
- void *set_bar_attributes;
- uint64_t romsize;
- void *romimage;
-} efi_pci_io_protocol_t;
-
-#define EFI_PCI_IO_ATTRIBUTE_ISA_MOTHERBOARD_IO 0x0001
-#define EFI_PCI_IO_ATTRIBUTE_ISA_IO 0x0002
-#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO 0x0004
-#define EFI_PCI_IO_ATTRIBUTE_VGA_MEMORY 0x0008
-#define EFI_PCI_IO_ATTRIBUTE_VGA_IO 0x0010
-#define EFI_PCI_IO_ATTRIBUTE_IDE_PRIMARY_IO 0x0020
-#define EFI_PCI_IO_ATTRIBUTE_IDE_SECONDARY_IO 0x0040
-#define EFI_PCI_IO_ATTRIBUTE_MEMORY_WRITE_COMBINE 0x0080
-#define EFI_PCI_IO_ATTRIBUTE_IO 0x0100
-#define EFI_PCI_IO_ATTRIBUTE_MEMORY 0x0200
-#define EFI_PCI_IO_ATTRIBUTE_BUS_MASTER 0x0400
-#define EFI_PCI_IO_ATTRIBUTE_MEMORY_CACHED 0x0800
-#define EFI_PCI_IO_ATTRIBUTE_MEMORY_DISABLE 0x1000
-#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_DEVICE 0x2000
-#define EFI_PCI_IO_ATTRIBUTE_EMBEDDED_ROM 0x4000
-#define EFI_PCI_IO_ATTRIBUTE_DUAL_ADDRESS_CYCLE 0x8000
-#define EFI_PCI_IO_ATTRIBUTE_ISA_IO_16 0x10000
-#define EFI_PCI_IO_ATTRIBUTE_VGA_PALETTE_IO_16 0x20000
-#define EFI_PCI_IO_ATTRIBUTE_VGA_IO_16 0x40000
-
-typedef struct {
- u32 version;
- u32 get;
- u32 set;
- u32 del;
- u32 get_all;
-} apple_properties_protocol_32_t;
-
-typedef struct {
- u64 version;
- u64 get;
- u64 set;
- u64 del;
- u64 get_all;
-} apple_properties_protocol_64_t;
-
-typedef struct {
- u32 get_capability;
- u32 get_event_log;
- u32 hash_log_extend_event;
- u32 submit_command;
- u32 get_active_pcr_banks;
- u32 set_active_pcr_banks;
- u32 get_result_of_set_active_pcr_banks;
-} efi_tcg2_protocol_32_t;
-
-typedef struct {
- u64 get_capability;
- u64 get_event_log;
- u64 hash_log_extend_event;
- u64 submit_command;
- u64 get_active_pcr_banks;
- u64 set_active_pcr_banks;
- u64 get_result_of_set_active_pcr_banks;
-} efi_tcg2_protocol_64_t;
-
-typedef u32 efi_tcg2_event_log_format;
-
-typedef struct {
- void *get_capability;
- efi_status_t (*get_event_log)(efi_handle_t, efi_tcg2_event_log_format,
- efi_physical_addr_t *, efi_physical_addr_t *, efi_bool_t *);
- void *hash_log_extend_event;
- void *submit_command;
- void *get_active_pcr_banks;
- void *set_active_pcr_banks;
- void *get_result_of_set_active_pcr_banks;
-} efi_tcg2_protocol_t;
+typedef union efi_boot_services efi_boot_services_t;
/*
* Types and defines for EFI ResetSystem
@@ -552,24 +227,6 @@ typedef struct {
u32 query_variable_info;
} efi_runtime_services_32_t;
-typedef struct {
- efi_table_hdr_t hdr;
- u64 get_time;
- u64 set_time;
- u64 get_wakeup_time;
- u64 set_wakeup_time;
- u64 set_virtual_address_map;
- u64 convert_pointer;
- u64 get_variable;
- u64 get_next_variable;
- u64 set_variable;
- u64 get_next_high_mono_count;
- u64 reset_system;
- u64 update_capsule;
- u64 query_capsule_caps;
- u64 query_variable_info;
-} efi_runtime_services_64_t;
-
typedef efi_status_t efi_get_time_t (efi_time_t *tm, efi_time_cap_t *tc);
typedef efi_status_t efi_set_time_t (efi_time_t *tm);
typedef efi_status_t efi_get_wakeup_time_t (efi_bool_t *enabled, efi_bool_t *pending,
@@ -604,22 +261,25 @@ typedef efi_status_t efi_query_variable_store_t(u32 attributes,
unsigned long size,
bool nonblocking);
-typedef struct {
- efi_table_hdr_t hdr;
- efi_get_time_t *get_time;
- efi_set_time_t *set_time;
- efi_get_wakeup_time_t *get_wakeup_time;
- efi_set_wakeup_time_t *set_wakeup_time;
- efi_set_virtual_address_map_t *set_virtual_address_map;
- void *convert_pointer;
- efi_get_variable_t *get_variable;
- efi_get_next_variable_t *get_next_variable;
- efi_set_variable_t *set_variable;
- efi_get_next_high_mono_count_t *get_next_high_mono_count;
- efi_reset_system_t *reset_system;
- efi_update_capsule_t *update_capsule;
- efi_query_capsule_caps_t *query_capsule_caps;
- efi_query_variable_info_t *query_variable_info;
+typedef union {
+ struct {
+ efi_table_hdr_t hdr;
+ efi_get_time_t __efiapi *get_time;
+ efi_set_time_t __efiapi *set_time;
+ efi_get_wakeup_time_t __efiapi *get_wakeup_time;
+ efi_set_wakeup_time_t __efiapi *set_wakeup_time;
+ efi_set_virtual_address_map_t __efiapi *set_virtual_address_map;
+ void *convert_pointer;
+ efi_get_variable_t __efiapi *get_variable;
+ efi_get_next_variable_t __efiapi *get_next_variable;
+ efi_set_variable_t __efiapi *set_variable;
+ efi_get_next_high_mono_count_t __efiapi *get_next_high_mono_count;
+ efi_reset_system_t __efiapi *reset_system;
+ efi_update_capsule_t __efiapi *update_capsule;
+ efi_query_capsule_caps_t __efiapi *query_capsule_caps;
+ efi_query_variable_info_t __efiapi *query_variable_info;
+ };
+ efi_runtime_services_32_t mixed_mode;
} efi_runtime_services_t;
void efi_native_runtime_setup(void);
@@ -672,6 +332,9 @@ void efi_native_runtime_setup(void);
#define EFI_CONSOLE_OUT_DEVICE_GUID EFI_GUID(0xd3b36f2c, 0xd551, 0x11d4, 0x9a, 0x46, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define APPLE_PROPERTIES_PROTOCOL_GUID EFI_GUID(0x91bd12fe, 0xf6c3, 0x44fb, 0xa5, 0xb7, 0x51, 0x22, 0xab, 0x30, 0x3a, 0xe0)
#define EFI_TCG2_PROTOCOL_GUID EFI_GUID(0x607f766c, 0x7455, 0x42be, 0x93, 0x0b, 0xe4, 0xd7, 0x6d, 0xb2, 0x72, 0x0f)
+#define EFI_LOAD_FILE_PROTOCOL_GUID EFI_GUID(0x56ec3091, 0x954c, 0x11d2, 0x8e, 0x3f, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b)
+#define EFI_LOAD_FILE2_PROTOCOL_GUID EFI_GUID(0x4006c0c1, 0xfcb3, 0x403e, 0x99, 0x6d, 0x4a, 0x6c, 0x87, 0x24, 0xe0, 0x6d)
+#define EFI_RT_PROPERTIES_TABLE_GUID EFI_GUID(0xeb66918a, 0x7eef, 0x402a, 0x84, 0x2e, 0x93, 0x1d, 0x21, 0xc3, 0x8a, 0xe9)
#define EFI_IMAGE_SECURITY_DATABASE_GUID EFI_GUID(0xd719b2cb, 0x3d3a, 0x4596, 0xa3, 0xbc, 0xda, 0xd0, 0x0e, 0x67, 0x65, 0x6f)
#define EFI_SHIM_LOCK_GUID EFI_GUID(0x605dab50, 0xe046, 0x4300, 0xab, 0xb6, 0x3d, 0xd8, 0x10, 0xdd, 0x8b, 0x23)
@@ -691,6 +354,7 @@ void efi_native_runtime_setup(void);
#define LINUX_EFI_TPM_EVENT_LOG_GUID EFI_GUID(0xb7799cb0, 0xeca2, 0x4943, 0x96, 0x67, 0x1f, 0xae, 0x07, 0xb7, 0x47, 0xfa)
#define LINUX_EFI_TPM_FINAL_LOG_GUID EFI_GUID(0x1e2ed096, 0x30e2, 0x4254, 0xbd, 0x89, 0x86, 0x3b, 0xbe, 0xf8, 0x23, 0x25)
#define LINUX_EFI_MEMRESERVE_TABLE_GUID EFI_GUID(0x888eb0c6, 0x8ede, 0x4ff5, 0xa8, 0xf0, 0x9a, 0xee, 0x5c, 0xb9, 0x77, 0xc2)
+#define LINUX_EFI_INITRD_MEDIA_GUID EFI_GUID(0x5568e427, 0x68fc, 0x4f3d, 0xac, 0x74, 0xca, 0x55, 0x52, 0x31, 0xcc, 0x68)
/* OEM GUIDs */
#define DELLEMC_EFI_RCI2_TABLE_GUID EFI_GUID(0x2d9f28a2, 0xa886, 0x456a, 0x97, 0xa8, 0xf1, 0x1e, 0xf2, 0x4f, 0xf4, 0x55)
@@ -705,9 +369,12 @@ typedef struct {
u32 table;
} efi_config_table_32_t;
-typedef struct {
- efi_guid_t guid;
- unsigned long table;
+typedef union {
+ struct {
+ efi_guid_t guid;
+ void *table;
+ };
+ efi_config_table_32_t mixed_mode;
} efi_config_table_t;
typedef struct {
@@ -759,32 +426,38 @@ typedef struct {
u32 tables;
} efi_system_table_32_t;
-typedef struct {
- efi_table_hdr_t hdr;
- unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */
- u32 fw_revision;
- unsigned long con_in_handle;
- unsigned long con_in;
- unsigned long con_out_handle;
- unsigned long con_out;
- unsigned long stderr_handle;
- unsigned long stderr;
- efi_runtime_services_t *runtime;
- efi_boot_services_t *boottime;
- unsigned long nr_tables;
- unsigned long tables;
+typedef union efi_simple_text_output_protocol efi_simple_text_output_protocol_t;
+
+typedef union {
+ struct {
+ efi_table_hdr_t hdr;
+ unsigned long fw_vendor; /* physical addr of CHAR16 vendor string */
+ u32 fw_revision;
+ unsigned long con_in_handle;
+ unsigned long con_in;
+ unsigned long con_out_handle;
+ efi_simple_text_output_protocol_t *con_out;
+ unsigned long stderr_handle;
+ unsigned long stderr;
+ efi_runtime_services_t *runtime;
+ efi_boot_services_t *boottime;
+ unsigned long nr_tables;
+ unsigned long tables;
+ };
+ efi_system_table_32_t mixed_mode;
} efi_system_table_t;
/*
* Architecture independent structure for describing a memory map for the
- * benefit of efi_memmap_init_early(), saving us the need to pass four
- * parameters.
+ * benefit of efi_memmap_init_early(), and for passing context between
+ * efi_memmap_alloc() and efi_memmap_install().
*/
struct efi_memory_map_data {
phys_addr_t phys_map;
unsigned long size;
unsigned long desc_version;
unsigned long desc_size;
+ unsigned long flags;
};
struct efi_memory_map {
@@ -794,7 +467,10 @@ struct efi_memory_map {
int nr_map;
unsigned long desc_version;
unsigned long desc_size;
- bool late;
+#define EFI_MEMMAP_LATE (1UL << 0)
+#define EFI_MEMMAP_MEMBLOCK (1UL << 1)
+#define EFI_MEMMAP_SLAB (1UL << 2)
+ unsigned long flags;
};
struct efi_mem_range {
@@ -802,140 +478,6 @@ struct efi_mem_range {
u64 attribute;
};
-struct efi_fdt_params {
- u64 system_table;
- u64 mmap;
- u32 mmap_size;
- u32 desc_size;
- u32 desc_ver;
-};
-
-typedef struct {
- u32 revision;
- u32 parent_handle;
- u32 system_table;
- u32 device_handle;
- u32 file_path;
- u32 reserved;
- u32 load_options_size;
- u32 load_options;
- u32 image_base;
- __aligned_u64 image_size;
- unsigned int image_code_type;
- unsigned int image_data_type;
- unsigned long unload;
-} efi_loaded_image_32_t;
-
-typedef struct {
- u32 revision;
- u64 parent_handle;
- u64 system_table;
- u64 device_handle;
- u64 file_path;
- u64 reserved;
- u32 load_options_size;
- u64 load_options;
- u64 image_base;
- __aligned_u64 image_size;
- unsigned int image_code_type;
- unsigned int image_data_type;
- unsigned long unload;
-} efi_loaded_image_64_t;
-
-typedef struct {
- u32 revision;
- void *parent_handle;
- efi_system_table_t *system_table;
- void *device_handle;
- void *file_path;
- void *reserved;
- u32 load_options_size;
- void *load_options;
- void *image_base;
- __aligned_u64 image_size;
- unsigned int image_code_type;
- unsigned int image_data_type;
- unsigned long unload;
-} efi_loaded_image_t;
-
-
-typedef struct {
- u64 size;
- u64 file_size;
- u64 phys_size;
- efi_time_t create_time;
- efi_time_t last_access_time;
- efi_time_t modification_time;
- __aligned_u64 attribute;
- efi_char16_t filename[1];
-} efi_file_info_t;
-
-typedef struct {
- u64 revision;
- u32 open;
- u32 close;
- u32 delete;
- u32 read;
- u32 write;
- u32 get_position;
- u32 set_position;
- u32 get_info;
- u32 set_info;
- u32 flush;
-} efi_file_handle_32_t;
-
-typedef struct {
- u64 revision;
- u64 open;
- u64 close;
- u64 delete;
- u64 read;
- u64 write;
- u64 get_position;
- u64 set_position;
- u64 get_info;
- u64 set_info;
- u64 flush;
-} efi_file_handle_64_t;
-
-typedef struct _efi_file_handle {
- u64 revision;
- efi_status_t (*open)(struct _efi_file_handle *,
- struct _efi_file_handle **,
- efi_char16_t *, u64, u64);
- efi_status_t (*close)(struct _efi_file_handle *);
- void *delete;
- efi_status_t (*read)(struct _efi_file_handle *, unsigned long *,
- void *);
- void *write;
- void *get_position;
- void *set_position;
- efi_status_t (*get_info)(struct _efi_file_handle *, efi_guid_t *,
- unsigned long *, void *);
- void *set_info;
- void *flush;
-} efi_file_handle_t;
-
-typedef struct {
- u64 revision;
- u32 open_volume;
-} efi_file_io_interface_32_t;
-
-typedef struct {
- u64 revision;
- u64 open_volume;
-} efi_file_io_interface_64_t;
-
-typedef struct _efi_file_io_interface {
- u64 revision;
- int (*open_volume)(struct _efi_file_io_interface *,
- efi_file_handle_t **);
-} efi_file_io_interface_t;
-
-#define EFI_FILE_MODE_READ 0x0000000000000001
-#define EFI_FILE_MODE_WRITE 0x0000000000000002
-#define EFI_FILE_MODE_CREATE 0x8000000000000000
-
typedef struct {
u32 version;
u32 length;
@@ -945,6 +487,14 @@ typedef struct {
#define EFI_PROPERTIES_TABLE_VERSION 0x00010000
#define EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA 0x1
+typedef struct {
+ u16 version;
+ u16 length;
+ u32 runtime_services_supported;
+} efi_rt_properties_table_t;
+
+#define EFI_RT_PROPERTIES_TABLE_VERSION 0x1
+
#define EFI_INVALID_TABLE_ADDR (~0UL)
typedef struct {
@@ -976,49 +526,63 @@ typedef struct {
efi_time_t time_of_revocation;
} efi_cert_x509_sha256_t;
+extern unsigned long __ro_after_init efi_rng_seed; /* RNG Seed table */
+
/*
* All runtime access to EFI goes through this structure:
*/
extern struct efi {
- efi_system_table_t *systab; /* EFI system table */
- unsigned int runtime_version; /* Runtime services version */
- unsigned long mps; /* MPS table */
- unsigned long acpi; /* ACPI table (IA64 ext 0.71) */
- unsigned long acpi20; /* ACPI table (ACPI 2.0) */
- unsigned long smbios; /* SMBIOS table (32 bit entry point) */
- unsigned long smbios3; /* SMBIOS table (64 bit entry point) */
- unsigned long boot_info; /* boot info table */
- unsigned long hcdp; /* HCDP table */
- unsigned long uga; /* UGA table */
- unsigned long fw_vendor; /* fw_vendor */
- unsigned long runtime; /* runtime table */
- unsigned long config_table; /* config tables */
- unsigned long esrt; /* ESRT table */
- unsigned long properties_table; /* properties table */
- unsigned long mem_attr_table; /* memory attributes table */
- unsigned long rng_seed; /* UEFI firmware random seed */
- unsigned long tpm_log; /* TPM2 Event Log table */
- unsigned long tpm_final_log; /* TPM2 Final Events Log table */
- unsigned long mem_reserve; /* Linux EFI memreserve table */
- efi_get_time_t *get_time;
- efi_set_time_t *set_time;
- efi_get_wakeup_time_t *get_wakeup_time;
- efi_set_wakeup_time_t *set_wakeup_time;
- efi_get_variable_t *get_variable;
- efi_get_next_variable_t *get_next_variable;
- efi_set_variable_t *set_variable;
- efi_set_variable_t *set_variable_nonblocking;
- efi_query_variable_info_t *query_variable_info;
- efi_query_variable_info_t *query_variable_info_nonblocking;
- efi_update_capsule_t *update_capsule;
- efi_query_capsule_caps_t *query_capsule_caps;
- efi_get_next_high_mono_count_t *get_next_high_mono_count;
- efi_reset_system_t *reset_system;
- efi_set_virtual_address_map_t *set_virtual_address_map;
- struct efi_memory_map memmap;
- unsigned long flags;
+ const efi_runtime_services_t *runtime; /* EFI runtime services table */
+ unsigned int runtime_version; /* Runtime services version */
+ unsigned int runtime_supported_mask;
+
+ unsigned long acpi; /* ACPI table (IA64 ext 0.71) */
+ unsigned long acpi20; /* ACPI table (ACPI 2.0) */
+ unsigned long smbios; /* SMBIOS table (32 bit entry point) */
+ unsigned long smbios3; /* SMBIOS table (64 bit entry point) */
+ unsigned long esrt; /* ESRT table */
+ unsigned long tpm_log; /* TPM2 Event Log table */
+ unsigned long tpm_final_log; /* TPM2 Final Events Log table */
+
+ efi_get_time_t *get_time;
+ efi_set_time_t *set_time;
+ efi_get_wakeup_time_t *get_wakeup_time;
+ efi_set_wakeup_time_t *set_wakeup_time;
+ efi_get_variable_t *get_variable;
+ efi_get_next_variable_t *get_next_variable;
+ efi_set_variable_t *set_variable;
+ efi_set_variable_t *set_variable_nonblocking;
+ efi_query_variable_info_t *query_variable_info;
+ efi_query_variable_info_t *query_variable_info_nonblocking;
+ efi_update_capsule_t *update_capsule;
+ efi_query_capsule_caps_t *query_capsule_caps;
+ efi_get_next_high_mono_count_t *get_next_high_mono_count;
+ efi_reset_system_t *reset_system;
+
+ struct efi_memory_map memmap;
+ unsigned long flags;
} efi;
+#define EFI_RT_SUPPORTED_GET_TIME 0x0001
+#define EFI_RT_SUPPORTED_SET_TIME 0x0002
+#define EFI_RT_SUPPORTED_GET_WAKEUP_TIME 0x0004
+#define EFI_RT_SUPPORTED_SET_WAKEUP_TIME 0x0008
+#define EFI_RT_SUPPORTED_GET_VARIABLE 0x0010
+#define EFI_RT_SUPPORTED_GET_NEXT_VARIABLE_NAME 0x0020
+#define EFI_RT_SUPPORTED_SET_VARIABLE 0x0040
+#define EFI_RT_SUPPORTED_SET_VIRTUAL_ADDRESS_MAP 0x0080
+#define EFI_RT_SUPPORTED_CONVERT_POINTER 0x0100
+#define EFI_RT_SUPPORTED_GET_NEXT_HIGH_MONOTONIC_COUNT 0x0200
+#define EFI_RT_SUPPORTED_RESET_SYSTEM 0x0400
+#define EFI_RT_SUPPORTED_UPDATE_CAPSULE 0x0800
+#define EFI_RT_SUPPORTED_QUERY_CAPSULE_CAPABILITIES 0x1000
+#define EFI_RT_SUPPORTED_QUERY_VARIABLE_INFO 0x2000
+
+#define EFI_RT_SUPPORTED_ALL 0x3fff
+
+#define EFI_RT_SUPPORTED_TIME_SERVICES 0x000f
+#define EFI_RT_SUPPORTED_VARIABLE_SERVICES 0x0070
+
extern struct mm_struct efi_mm;
static inline int
@@ -1044,7 +608,6 @@ extern void efi_enter_virtual_mode (void); /* switch EFI to virtual mode, if pos
extern efi_status_t efi_query_variable_store(u32 attributes,
unsigned long size,
bool nonblocking);
-extern void efi_find_mirror(void);
#else
static inline efi_status_t efi_query_variable_store(u32 attributes,
@@ -1056,24 +619,31 @@ static inline efi_status_t efi_query_variable_store(u32 attributes,
#endif
extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
-extern phys_addr_t __init efi_memmap_alloc(unsigned int num_entries);
+extern int __init efi_memmap_alloc(unsigned int num_entries,
+ struct efi_memory_map_data *data);
+extern void __efi_memmap_free(u64 phys, unsigned long size,
+ unsigned long flags);
extern int __init efi_memmap_init_early(struct efi_memory_map_data *data);
extern int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size);
extern void __init efi_memmap_unmap(void);
-extern int __init efi_memmap_install(phys_addr_t addr, unsigned int nr_map);
+extern int __init efi_memmap_install(struct efi_memory_map_data *data);
extern int __init efi_memmap_split_count(efi_memory_desc_t *md,
struct range *range);
extern void __init efi_memmap_insert(struct efi_memory_map *old_memmap,
void *buf, struct efi_mem_range *mem);
-extern int efi_config_init(efi_config_table_type_t *arch_tables);
#ifdef CONFIG_EFI_ESRT
extern void __init efi_esrt_init(void);
#else
static inline void efi_esrt_init(void) { }
#endif
-extern int efi_config_parse_tables(void *config_tables, int count, int sz,
- efi_config_table_type_t *arch_tables);
+extern int efi_config_parse_tables(const efi_config_table_t *config_tables,
+ int count,
+ const efi_config_table_type_t *arch_tables);
+extern int efi_systab_check_header(const efi_table_hdr_t *systab_hdr,
+ int min_major_version);
+extern void efi_systab_report_header(const efi_table_hdr_t *systab_hdr,
+ unsigned long fw_vendor);
extern u64 efi_get_iobase (void);
extern int efi_mem_type(unsigned long phys_addr);
extern u64 efi_mem_attributes (unsigned long phys_addr);
@@ -1085,7 +655,7 @@ extern void efi_mem_reserve(phys_addr_t addr, u64 size);
extern int efi_mem_reserve_persistent(phys_addr_t addr, u64 size);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
-extern int efi_get_fdt_params(struct efi_fdt_params *params);
+extern u64 efi_get_fdt_params(struct efi_memory_map_data *data);
extern struct kobject *efi_kobj;
extern int efi_reboot_quirk_mode;
@@ -1097,6 +667,8 @@ extern void __init efi_fake_memmap(void);
static inline void efi_fake_memmap(void) { }
#endif
+extern unsigned long efi_mem_attr_table;
+
/*
* efi_memattr_perm_setter - arch specific callback function passed into
* efi_memattr_apply_permissions() that updates the
@@ -1202,6 +774,8 @@ extern int __init efi_setup_pcdp_console(char *);
#define EFI_DBG 8 /* Print additional debug info at runtime */
#define EFI_NX_PE_DATA 9 /* Can runtime data regions be mapped non-executable? */
#define EFI_MEM_ATTR 10 /* Did firmware publish an EFI_MEMORY_ATTRIBUTES table? */
+#define EFI_MEM_NO_SOFT_RESERVE 11 /* Is the kernel configured to ignore soft reservations? */
+#define EFI_PRESERVE_BS_REGIONS 12 /* Are EFI boot-services memory segments available? */
#ifdef CONFIG_EFI
/*
@@ -1212,6 +786,19 @@ static inline bool efi_enabled(int feature)
return test_bit(feature, &efi.flags) != 0;
}
extern void efi_reboot(enum reboot_mode reboot_mode, const char *__unused);
+
+bool __pure __efi_soft_reserve_enabled(void);
+
+static inline bool __pure efi_soft_reserve_enabled(void)
+{
+ return IS_ENABLED(CONFIG_EFI_SOFT_RESERVE)
+ && __efi_soft_reserve_enabled();
+}
+
+static inline bool efi_rt_services_supported(unsigned int mask)
+{
+ return (efi.runtime_supported_mask & mask) == mask;
+}
#else
static inline bool efi_enabled(int feature)
{
@@ -1225,6 +812,16 @@ efi_capsule_pending(int *reset_type)
{
return false;
}
+
+static inline bool efi_soft_reserve_enabled(void)
+{
+ return false;
+}
+
+static inline bool efi_rt_services_supported(unsigned int mask)
+{
+ return false;
+}
#endif
extern int efi_status_to_err(efi_status_t status);
@@ -1254,13 +851,6 @@ extern int efi_status_to_err(efi_status_t status);
#define EFI_VARIABLE_GUID_LEN UUID_STRING_LEN
/*
- * The type of search to perform when calling boottime->locate_handle
- */
-#define EFI_LOCATE_ALL_HANDLES 0
-#define EFI_LOCATE_BY_REGISTER_NOTIFY 1
-#define EFI_LOCATE_BY_PROTOCOL 2
-
-/*
* EFI Device Path information
*/
#define EFI_DEV_HW 0x01
@@ -1299,30 +889,40 @@ extern int efi_status_to_err(efi_status_t status);
#define EFI_DEV_END_ENTIRE 0xFF
struct efi_generic_dev_path {
- u8 type;
- u8 sub_type;
- u16 length;
-} __attribute ((packed));
+ u8 type;
+ u8 sub_type;
+ u16 length;
+} __packed;
+
+struct efi_acpi_dev_path {
+ struct efi_generic_dev_path header;
+ u32 hid;
+ u32 uid;
+} __packed;
+
+struct efi_pci_dev_path {
+ struct efi_generic_dev_path header;
+ u8 fn;
+ u8 dev;
+} __packed;
+
+struct efi_vendor_dev_path {
+ struct efi_generic_dev_path header;
+ efi_guid_t vendorguid;
+ u8 vendordata[];
+} __packed;
struct efi_dev_path {
- u8 type; /* can be replaced with unnamed */
- u8 sub_type; /* struct efi_generic_dev_path; */
- u16 length; /* once we've moved to -std=c11 */
union {
- struct {
- u32 hid;
- u32 uid;
- } acpi;
- struct {
- u8 fn;
- u8 dev;
- } pci;
+ struct efi_generic_dev_path header;
+ struct efi_acpi_dev_path acpi;
+ struct efi_pci_dev_path pci;
+ struct efi_vendor_dev_path vendor;
};
-} __attribute ((packed));
+} __packed;
-#if IS_ENABLED(CONFIG_EFI_DEV_PATH_PARSER)
-struct device *efi_get_device_by_path(struct efi_dev_path **node, size_t *len);
-#endif
+struct device *efi_get_device_by_path(const struct efi_dev_path **node,
+ size_t *len);
static inline void memrange_efi_to_native(u64 *addr, u64 *npages)
{
@@ -1377,98 +977,6 @@ struct efivar_entry {
bool deleting;
};
-typedef struct {
- u32 reset;
- u32 output_string;
- u32 test_string;
-} efi_simple_text_output_protocol_32_t;
-
-typedef struct {
- u64 reset;
- u64 output_string;
- u64 test_string;
-} efi_simple_text_output_protocol_64_t;
-
-struct efi_simple_text_output_protocol {
- void *reset;
- efi_status_t (*output_string)(void *, void *);
- void *test_string;
-};
-
-#define PIXEL_RGB_RESERVED_8BIT_PER_COLOR 0
-#define PIXEL_BGR_RESERVED_8BIT_PER_COLOR 1
-#define PIXEL_BIT_MASK 2
-#define PIXEL_BLT_ONLY 3
-#define PIXEL_FORMAT_MAX 4
-
-struct efi_pixel_bitmask {
- u32 red_mask;
- u32 green_mask;
- u32 blue_mask;
- u32 reserved_mask;
-};
-
-struct efi_graphics_output_mode_info {
- u32 version;
- u32 horizontal_resolution;
- u32 vertical_resolution;
- int pixel_format;
- struct efi_pixel_bitmask pixel_information;
- u32 pixels_per_scan_line;
-} __packed;
-
-struct efi_graphics_output_protocol_mode_32 {
- u32 max_mode;
- u32 mode;
- u32 info;
- u32 size_of_info;
- u64 frame_buffer_base;
- u32 frame_buffer_size;
-} __packed;
-
-struct efi_graphics_output_protocol_mode_64 {
- u32 max_mode;
- u32 mode;
- u64 info;
- u64 size_of_info;
- u64 frame_buffer_base;
- u64 frame_buffer_size;
-} __packed;
-
-struct efi_graphics_output_protocol_mode {
- u32 max_mode;
- u32 mode;
- unsigned long info;
- unsigned long size_of_info;
- u64 frame_buffer_base;
- unsigned long frame_buffer_size;
-} __packed;
-
-struct efi_graphics_output_protocol_32 {
- u32 query_mode;
- u32 set_mode;
- u32 blt;
- u32 mode;
-};
-
-struct efi_graphics_output_protocol_64 {
- u64 query_mode;
- u64 set_mode;
- u64 blt;
- u64 mode;
-};
-
-struct efi_graphics_output_protocol {
- unsigned long query_mode;
- unsigned long set_mode;
- unsigned long blt;
- struct efi_graphics_output_protocol_mode *mode;
-};
-
-typedef efi_status_t (*efi_graphics_output_protocol_query_mode)(
- struct efi_graphics_output_protocol *, u32, unsigned long *,
- struct efi_graphics_output_mode_info **);
-
extern struct list_head efivar_sysfs_list;
static inline void
@@ -1566,61 +1074,6 @@ static inline int efi_runtime_map_copy(void *buf, size_t bufsz)
#endif
-/* prototypes shared between arch specific and generic stub code */
-
-void efi_printk(efi_system_table_t *sys_table_arg, char *str);
-
-void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
- unsigned long addr);
-
-char *efi_convert_cmdline(efi_system_table_t *sys_table_arg,
- efi_loaded_image_t *image, int *cmd_line_len);
-
-efi_status_t efi_get_memory_map(efi_system_table_t *sys_table_arg,
- struct efi_boot_memmap *map);
-
-efi_status_t efi_low_alloc_above(efi_system_table_t *sys_table_arg,
- unsigned long size, unsigned long align,
- unsigned long *addr, unsigned long min);
-
-static inline
-efi_status_t efi_low_alloc(efi_system_table_t *sys_table_arg,
- unsigned long size, unsigned long align,
- unsigned long *addr)
-{
- /*
- * Don't allocate at 0x0. It will confuse code that
- * checks pointers against NULL. Skip the first 8
- * bytes so we start at a nice even number.
- */
- return efi_low_alloc_above(sys_table_arg, size, align, addr, 0x8);
-}
-
-efi_status_t efi_high_alloc(efi_system_table_t *sys_table_arg,
- unsigned long size, unsigned long align,
- unsigned long *addr, unsigned long max);
-
-efi_status_t efi_relocate_kernel(efi_system_table_t *sys_table_arg,
- unsigned long *image_addr,
- unsigned long image_size,
- unsigned long alloc_size,
- unsigned long preferred_addr,
- unsigned long alignment,
- unsigned long min_addr);
-
-efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
- efi_loaded_image_t *image,
- char *cmd_line, char *option_string,
- unsigned long max_addr,
- unsigned long *load_addr,
- unsigned long *load_size);
-
-efi_status_t efi_parse_options(char const *cmdline);
-
-efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
- struct screen_info *si, efi_guid_t *proto,
- unsigned long size);
-
#ifdef CONFIG_EFI
extern bool efi_runtime_disabled(void);
#else
@@ -1636,16 +1089,24 @@ enum efi_secureboot_mode {
efi_secureboot_mode_disabled,
efi_secureboot_mode_enabled,
};
-enum efi_secureboot_mode efi_get_secureboot(efi_system_table_t *sys_table);
+enum efi_secureboot_mode efi_get_secureboot(void);
#ifdef CONFIG_RESET_ATTACK_MITIGATION
-void efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg);
+void efi_enable_reset_attack_mitigation(void);
#else
static inline void
-efi_enable_reset_attack_mitigation(efi_system_table_t *sys_table_arg) { }
+efi_enable_reset_attack_mitigation(void) { }
+#endif
+
+#ifdef CONFIG_EFI_EMBEDDED_FIRMWARE
+void efi_check_for_embedded_firmwares(void);
+#else
+static inline void efi_check_for_embedded_firmwares(void) { }
#endif
-void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table);
+efi_status_t efi_random_get_seed(void);
+
+void efi_retrieve_tpm2_eventlog(void);
/*
* Arch code can implement the following three template macros, avoiding
@@ -1696,17 +1157,6 @@ void efi_retrieve_tpm2_eventlog(efi_system_table_t *sys_table);
arch_efi_call_virt_teardown(); \
})
-typedef efi_status_t (*efi_exit_boot_map_processing)(
- efi_system_table_t *sys_table_arg,
- struct efi_boot_memmap *map,
- void *priv);
-
-efi_status_t efi_exit_boot_services(efi_system_table_t *sys_table,
- void *handle,
- struct efi_boot_memmap *map,
- void *priv,
- efi_exit_boot_map_processing priv_func);
-
#define EFI_RANDOM_SEED_SIZE 64U
struct linux_efi_random_seed {
@@ -1793,4 +1243,6 @@ struct linux_efi_memreserve {
#define EFI_MEMRESERVE_COUNT(size) (((size) - sizeof(struct linux_efi_memreserve)) \
/ sizeof(((struct linux_efi_memreserve *)0)->entry[0]))
+void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size);
+
#endif /* _LINUX_EFI_H */
diff --git a/include/linux/efi_embedded_fw.h b/include/linux/efi_embedded_fw.h
new file mode 100644
index 000000000000..57eac5241303
--- /dev/null
+++ b/include/linux/efi_embedded_fw.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_EFI_EMBEDDED_FW_H
+#define _LINUX_EFI_EMBEDDED_FW_H
+
+#include <linux/list.h>
+#include <linux/mod_devicetable.h>
+
+#define EFI_EMBEDDED_FW_PREFIX_LEN 8
+
+/*
+ * This struct and efi_embedded_fw_list are private to the efi-embedded fw
+ * implementation they are in this header for use by lib/test_firmware.c only!
+ */
+struct efi_embedded_fw {
+ struct list_head list;
+ const char *name;
+ const u8 *data;
+ size_t length;
+};
+
+extern struct list_head efi_embedded_fw_list;
+
+/**
+ * struct efi_embedded_fw_desc - This struct is used by the EFI embedded-fw
+ * code to search for embedded firmwares.
+ *
+ * @name: Name to register the firmware with if found
+ * @prefix: First 8 bytes of the firmware
+ * @length: Length of the firmware in bytes including prefix
+ * @sha256: SHA256 of the firmware
+ */
+struct efi_embedded_fw_desc {
+ const char *name;
+ u8 prefix[EFI_EMBEDDED_FW_PREFIX_LEN];
+ u32 length;
+ u8 sha256[32];
+};
+
+extern const struct dmi_system_id touchscreen_dmi_table[];
+
+int efi_get_embedded_fw(const char *name, const u8 **dat, size_t *sz);
+
+#endif
diff --git a/include/linux/elfnote.h b/include/linux/elfnote.h
index f236f5b931b2..594d4e78654f 100644
--- a/include/linux/elfnote.h
+++ b/include/linux/elfnote.h
@@ -59,7 +59,7 @@
ELFNOTE_END
#else /* !__ASSEMBLER__ */
-#include <linux/elf.h>
+#include <uapi/linux/elf.h>
/*
* Use an anonymous structure which matches the shape of
* Elf{32,64}_Nhdr, but includes the name and desc data. The size and
diff --git a/include/linux/energy_model.h b/include/linux/energy_model.h
index 73f8c3cb9588..d249b88a4d5a 100644
--- a/include/linux/energy_model.h
+++ b/include/linux/energy_model.h
@@ -9,7 +9,6 @@
#include <linux/sched/topology.h>
#include <linux/types.h>
-#ifdef CONFIG_ENERGY_MODEL
/**
* em_cap_state - Capacity state of a performance domain
* @frequency: The CPU frequency in KHz, for consistency with CPUFreq
@@ -40,6 +39,7 @@ struct em_perf_domain {
unsigned long cpus[0];
};
+#ifdef CONFIG_ENERGY_MODEL
#define EM_CPU_MAX_POWER 0xFFFF
struct em_data_callback {
@@ -160,7 +160,6 @@ static inline int em_pd_nr_cap_states(struct em_perf_domain *pd)
}
#else
-struct em_perf_domain {};
struct em_data_callback {};
#define EM_DATA_CB(_active_power_cb) { }
diff --git a/include/linux/errname.h b/include/linux/errname.h
new file mode 100644
index 000000000000..e8576ad90cb7
--- /dev/null
+++ b/include/linux/errname.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ERRNAME_H
+#define _LINUX_ERRNAME_H
+
+#include <linux/stddef.h>
+
+#ifdef CONFIG_SYMBOLIC_ERRNAME
+const char *errname(int err);
+#else
+static inline const char *errname(int err)
+{
+ return NULL;
+}
+#endif
+
+#endif /* _LINUX_ERRNAME_H */
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index f6564b572d77..8801f1f986e5 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -43,7 +43,6 @@ __be16 eth_header_parse_protocol(const struct sk_buff *skb);
int eth_prepare_mac_addr_change(struct net_device *dev, void *p);
void eth_commit_mac_addr_change(struct net_device *dev, void *p);
int eth_mac_addr(struct net_device *dev, void *p);
-int eth_change_mtu(struct net_device *dev, int new_mtu);
int eth_validate_addr(struct net_device *dev);
struct net_device *alloc_etherdev_mqs(int sizeof_priv, unsigned int txqs,
diff --git a/include/linux/ethtool_netlink.h b/include/linux/ethtool_netlink.h
new file mode 100644
index 000000000000..d01b77887f82
--- /dev/null
+++ b/include/linux/ethtool_netlink.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _LINUX_ETHTOOL_NETLINK_H_
+#define _LINUX_ETHTOOL_NETLINK_H_
+
+#include <uapi/linux/ethtool_netlink.h>
+#include <linux/ethtool.h>
+#include <linux/netdevice.h>
+
+#define __ETHTOOL_LINK_MODE_MASK_NWORDS \
+ DIV_ROUND_UP(__ETHTOOL_LINK_MODE_MASK_NBITS, 32)
+
+enum ethtool_multicast_groups {
+ ETHNL_MCGRP_MONITOR,
+};
+
+#endif /* _LINUX_ETHTOOL_NETLINK_H_ */
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index ffcc7724ca21..dc4fd8a6644d 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -12,6 +12,8 @@
#include <linux/fcntl.h>
#include <linux/wait.h>
#include <linux/err.h>
+#include <linux/percpu-defs.h>
+#include <linux/percpu.h>
/*
* CAREFUL: Check include/uapi/asm-generic/fcntl.h when defining
@@ -40,6 +42,13 @@ __u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_entry_t *wait,
__u64 *cnt);
+DECLARE_PER_CPU(int, eventfd_wake_count);
+
+static inline bool eventfd_signal_count(void)
+{
+ return this_cpu_read(eventfd_wake_count);
+}
+
#else /* CONFIG_EVENTFD */
/*
@@ -68,6 +77,11 @@ static inline int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx,
return -ENOSYS;
}
+static inline bool eventfd_signal_count(void)
+{
+ return false;
+}
+
#endif
#endif /* _LINUX_EVENTFD_H */
diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h
index bc6d79b00c4e..8f000fada5a4 100644
--- a/include/linux/eventpoll.h
+++ b/include/linux/eventpoll.h
@@ -61,6 +61,15 @@ static inline void eventpoll_release(struct file *file)
eventpoll_release_file(file);
}
+int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds,
+ bool nonblock);
+
+/* Tells if the epoll_ctl(2) operation needs an event copy from userspace */
+static inline int ep_op_has_event(int op)
+{
+ return op != EPOLL_CTL_DEL;
+}
+
#else
static inline void eventpoll_init_file(struct file *file) {}
diff --git a/include/linux/export.h b/include/linux/export.h
index 941d075f03d6..fceb5e855717 100644
--- a/include/linux/export.h
+++ b/include/linux/export.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef _LINUX_EXPORT_H
#define _LINUX_EXPORT_H
@@ -46,7 +47,7 @@ extern struct module __this_module;
* absolute relocations that require runtime processing on relocatable
* kernels.
*/
-#define __KSYMTAB_ENTRY_NS(sym, sec) \
+#define __KSYMTAB_ENTRY(sym, sec) \
__ADDRESSABLE(sym) \
asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \
" .balign 4 \n" \
@@ -56,33 +57,17 @@ extern struct module __this_module;
" .long __kstrtabns_" #sym "- . \n" \
" .previous \n")
-#define __KSYMTAB_ENTRY(sym, sec) \
- __ADDRESSABLE(sym) \
- asm(" .section \"___ksymtab" sec "+" #sym "\", \"a\" \n" \
- " .balign 4 \n" \
- "__ksymtab_" #sym ": \n" \
- " .long " #sym "- . \n" \
- " .long __kstrtab_" #sym "- . \n" \
- " .long 0 \n" \
- " .previous \n")
-
struct kernel_symbol {
int value_offset;
int name_offset;
int namespace_offset;
};
#else
-#define __KSYMTAB_ENTRY_NS(sym, sec) \
- static const struct kernel_symbol __ksymtab_##sym \
- __attribute__((section("___ksymtab" sec "+" #sym), used)) \
- __aligned(sizeof(void *)) \
- = { (unsigned long)&sym, __kstrtab_##sym, __kstrtabns_##sym }
-
#define __KSYMTAB_ENTRY(sym, sec) \
static const struct kernel_symbol __ksymtab_##sym \
__attribute__((section("___ksymtab" sec "+" #sym), used)) \
__aligned(sizeof(void *)) \
- = { (unsigned long)&sym, __kstrtab_##sym, NULL }
+ = { (unsigned long)&sym, __kstrtab_##sym, __kstrtabns_##sym }
struct kernel_symbol {
unsigned long value;
@@ -93,28 +78,33 @@ struct kernel_symbol {
#ifdef __GENKSYMS__
-#define ___EXPORT_SYMBOL(sym,sec) __GENKSYMS_EXPORT_SYMBOL(sym)
-#define ___EXPORT_SYMBOL_NS(sym,sec,ns) __GENKSYMS_EXPORT_SYMBOL(sym)
+#define ___EXPORT_SYMBOL(sym, sec, ns) __GENKSYMS_EXPORT_SYMBOL(sym)
#else
-#define ___export_symbol_common(sym, sec) \
- extern typeof(sym) sym; \
- __CRC_SYMBOL(sym, sec); \
- static const char __kstrtab_##sym[] \
- __attribute__((section("__ksymtab_strings"), used, aligned(1))) \
- = #sym \
-
-/* For every exported symbol, place a struct in the __ksymtab section */
-#define ___EXPORT_SYMBOL_NS(sym, sec, ns) \
- ___export_symbol_common(sym, sec); \
- static const char __kstrtabns_##sym[] \
- __attribute__((section("__ksymtab_strings"), used, aligned(1))) \
- = #ns; \
- __KSYMTAB_ENTRY_NS(sym, sec)
-
-#define ___EXPORT_SYMBOL(sym, sec) \
- ___export_symbol_common(sym, sec); \
+/*
+ * For every exported symbol, do the following:
+ *
+ * - If applicable, place a CRC entry in the __kcrctab section.
+ * - Put the name of the symbol and namespace (empty string "" for none) in
+ * __ksymtab_strings.
+ * - Place a struct kernel_symbol entry in the __ksymtab section.
+ *
+ * note on .section use: we specify progbits since usage of the "M" (SHF_MERGE)
+ * section flag requires it. Use '%progbits' instead of '@progbits' since the
+ * former apparently works on all arches according to the binutils source.
+ */
+#define ___EXPORT_SYMBOL(sym, sec, ns) \
+ extern typeof(sym) sym; \
+ extern const char __kstrtab_##sym[]; \
+ extern const char __kstrtabns_##sym[]; \
+ __CRC_SYMBOL(sym, sec); \
+ asm(" .section \"__ksymtab_strings\",\"aMS\",%progbits,1 \n" \
+ "__kstrtab_" #sym ": \n" \
+ " .asciz \"" #sym "\" \n" \
+ "__kstrtabns_" #sym ": \n" \
+ " .asciz \"" ns "\" \n" \
+ " .previous \n"); \
__KSYMTAB_ENTRY(sym, sec)
#endif
@@ -126,8 +116,7 @@ struct kernel_symbol {
* be reused in other execution contexts such as the UEFI stub or the
* decompressor.
*/
-#define __EXPORT_SYMBOL_NS(sym, sec, ns)
-#define __EXPORT_SYMBOL(sym, sec)
+#define __EXPORT_SYMBOL(sym, sec, ns)
#elif defined(CONFIG_TRIM_UNUSED_KSYMS)
@@ -143,48 +132,38 @@ struct kernel_symbol {
#define __ksym_marker(sym) \
static int __ksym_marker_##sym[0] __section(".discard.ksym") __used
-#define __EXPORT_SYMBOL(sym, sec) \
- __ksym_marker(sym); \
- __cond_export_sym(sym, sec, __is_defined(__KSYM_##sym))
-#define __cond_export_sym(sym, sec, conf) \
- ___cond_export_sym(sym, sec, conf)
-#define ___cond_export_sym(sym, sec, enabled) \
- __cond_export_sym_##enabled(sym, sec)
-#define __cond_export_sym_1(sym, sec) ___EXPORT_SYMBOL(sym, sec)
-#define __cond_export_sym_0(sym, sec) /* nothing */
-
-#define __EXPORT_SYMBOL_NS(sym, sec, ns) \
+#define __EXPORT_SYMBOL(sym, sec, ns) \
__ksym_marker(sym); \
- __cond_export_ns_sym(sym, sec, ns, __is_defined(__KSYM_##sym))
-#define __cond_export_ns_sym(sym, sec, ns, conf) \
- ___cond_export_ns_sym(sym, sec, ns, conf)
-#define ___cond_export_ns_sym(sym, sec, ns, enabled) \
- __cond_export_ns_sym_##enabled(sym, sec, ns)
-#define __cond_export_ns_sym_1(sym, sec, ns) ___EXPORT_SYMBOL_NS(sym, sec, ns)
-#define __cond_export_ns_sym_0(sym, sec, ns) /* nothing */
+ __cond_export_sym(sym, sec, ns, __is_defined(__KSYM_##sym))
+#define __cond_export_sym(sym, sec, ns, conf) \
+ ___cond_export_sym(sym, sec, ns, conf)
+#define ___cond_export_sym(sym, sec, ns, enabled) \
+ __cond_export_sym_##enabled(sym, sec, ns)
+#define __cond_export_sym_1(sym, sec, ns) ___EXPORT_SYMBOL(sym, sec, ns)
+#define __cond_export_sym_0(sym, sec, ns) /* nothing */
#else
-#define __EXPORT_SYMBOL_NS(sym,sec,ns) ___EXPORT_SYMBOL_NS(sym,sec,ns)
-#define __EXPORT_SYMBOL(sym,sec) ___EXPORT_SYMBOL(sym,sec)
+#define __EXPORT_SYMBOL(sym, sec, ns) ___EXPORT_SYMBOL(sym, sec, ns)
#endif /* CONFIG_MODULES */
#ifdef DEFAULT_SYMBOL_NAMESPACE
-#undef __EXPORT_SYMBOL
-#define __EXPORT_SYMBOL(sym, sec) \
- __EXPORT_SYMBOL_NS(sym, sec, DEFAULT_SYMBOL_NAMESPACE)
+#include <linux/stringify.h>
+#define _EXPORT_SYMBOL(sym, sec) __EXPORT_SYMBOL(sym, sec, __stringify(DEFAULT_SYMBOL_NAMESPACE))
+#else
+#define _EXPORT_SYMBOL(sym, sec) __EXPORT_SYMBOL(sym, sec, "")
#endif
-#define EXPORT_SYMBOL(sym) __EXPORT_SYMBOL(sym, "")
-#define EXPORT_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_gpl")
-#define EXPORT_SYMBOL_GPL_FUTURE(sym) __EXPORT_SYMBOL(sym, "_gpl_future")
-#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL_NS(sym, "", ns)
-#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL_NS(sym, "_gpl", ns)
+#define EXPORT_SYMBOL(sym) _EXPORT_SYMBOL(sym, "")
+#define EXPORT_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "_gpl")
+#define EXPORT_SYMBOL_GPL_FUTURE(sym) _EXPORT_SYMBOL(sym, "_gpl_future")
+#define EXPORT_SYMBOL_NS(sym, ns) __EXPORT_SYMBOL(sym, "", #ns)
+#define EXPORT_SYMBOL_NS_GPL(sym, ns) __EXPORT_SYMBOL(sym, "_gpl", #ns)
#ifdef CONFIG_UNUSED_SYMBOLS
-#define EXPORT_UNUSED_SYMBOL(sym) __EXPORT_SYMBOL(sym, "_unused")
-#define EXPORT_UNUSED_SYMBOL_GPL(sym) __EXPORT_SYMBOL(sym, "_unused_gpl")
+#define EXPORT_UNUSED_SYMBOL(sym) _EXPORT_SYMBOL(sym, "_unused")
+#define EXPORT_UNUSED_SYMBOL_GPL(sym) _EXPORT_SYMBOL(sym, "_unused_gpl")
#else
#define EXPORT_UNUSED_SYMBOL(sym)
#define EXPORT_UNUSED_SYMBOL_GPL(sym)
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index cf6571fc9c01..d896b8657085 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -105,6 +105,11 @@ enum fid_type {
FILEID_LUSTRE = 0x97,
/*
+ * 64 bit unique kernfs id
+ */
+ FILEID_KERNFS = 0xfe,
+
+ /*
* Filesystems must not use 0xff file ID.
*/
FILEID_INVALID = 0xff,
diff --git a/include/linux/extable.h b/include/linux/extable.h
index 81ecfaa83ad3..4ab9e78f313b 100644
--- a/include/linux/extable.h
+++ b/include/linux/extable.h
@@ -33,4 +33,14 @@ search_module_extables(unsigned long addr)
}
#endif /*CONFIG_MODULES*/
+#ifdef CONFIG_BPF_JIT
+const struct exception_table_entry *search_bpf_extables(unsigned long addr);
+#else
+static inline const struct exception_table_entry *
+search_bpf_extables(unsigned long addr)
+{
+ return NULL;
+}
+#endif
+
#endif /* _LINUX_EXTABLE_H */
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 2bdf643d8593..1b1d77ec2114 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -170,7 +170,7 @@ struct extcon_dev;
* Following APIs get the connected state of each external connector.
* The 'id' argument indicates the defined external connector.
*/
-extern int extcon_get_state(struct extcon_dev *edev, unsigned int id);
+int extcon_get_state(struct extcon_dev *edev, unsigned int id);
/*
* Following APIs get the property of each external connector.
@@ -181,10 +181,10 @@ extern int extcon_get_state(struct extcon_dev *edev, unsigned int id);
* for each external connector. They are used to get the capability of the
* property of each external connector based on the id and property.
*/
-extern int extcon_get_property(struct extcon_dev *edev, unsigned int id,
+int extcon_get_property(struct extcon_dev *edev, unsigned int id,
unsigned int prop,
union extcon_property_value *prop_val);
-extern int extcon_get_property_capability(struct extcon_dev *edev,
+int extcon_get_property_capability(struct extcon_dev *edev,
unsigned int id, unsigned int prop);
/*
@@ -196,38 +196,38 @@ extern int extcon_get_property_capability(struct extcon_dev *edev,
* extcon_register_notifier_all(*edev, *nb) : Register a notifier block
* for all supported external connectors of the extcon.
*/
-extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
+int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
-extern int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
+int extcon_unregister_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
-extern int devm_extcon_register_notifier(struct device *dev,
+int devm_extcon_register_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
-extern void devm_extcon_unregister_notifier(struct device *dev,
+void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
-extern int extcon_register_notifier_all(struct extcon_dev *edev,
+int extcon_register_notifier_all(struct extcon_dev *edev,
struct notifier_block *nb);
-extern int extcon_unregister_notifier_all(struct extcon_dev *edev,
+int extcon_unregister_notifier_all(struct extcon_dev *edev,
struct notifier_block *nb);
-extern int devm_extcon_register_notifier_all(struct device *dev,
+int devm_extcon_register_notifier_all(struct device *dev,
struct extcon_dev *edev,
struct notifier_block *nb);
-extern void devm_extcon_unregister_notifier_all(struct device *dev,
+void devm_extcon_unregister_notifier_all(struct device *dev,
struct extcon_dev *edev,
struct notifier_block *nb);
/*
* Following APIs get the extcon_dev from devicetree or by through extcon name.
*/
-extern struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
-extern struct extcon_dev *extcon_find_edev_by_node(struct device_node *node);
-extern struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
+struct extcon_dev *extcon_get_extcon_dev(const char *extcon_name);
+struct extcon_dev *extcon_find_edev_by_node(struct device_node *node);
+struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev,
int index);
/* Following API get the name of extcon device. */
-extern const char *extcon_get_edev_name(struct extcon_dev *edev);
+const char *extcon_get_edev_name(struct extcon_dev *edev);
#else /* CONFIG_EXTCON */
static inline int extcon_get_state(struct extcon_dev *edev, unsigned int id)
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index 284738996028..ac3f4888b3df 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -23,6 +23,7 @@
#define NULL_ADDR ((block_t)0) /* used as block_t addresses */
#define NEW_ADDR ((block_t)-1) /* used as block_t addresses */
+#define COMPRESS_ADDR ((block_t)-2) /* used as compressed data flag */
#define F2FS_BYTES_TO_BLK(bytes) ((bytes) >> F2FS_BLKSIZE_BITS)
#define F2FS_BLK_TO_BYTES(blk) ((blk) << F2FS_BLKSIZE_BITS)
@@ -271,6 +272,10 @@ struct f2fs_inode {
__le32 i_inode_checksum;/* inode meta checksum */
__le64 i_crtime; /* creation time */
__le32 i_crtime_nsec; /* creation time in nano scale */
+ __le64 i_compr_blocks; /* # of compressed blocks */
+ __u8 i_compress_algorithm; /* compress algorithm */
+ __u8 i_log_cluster_size; /* log of cluster size */
+ __le16 i_padding; /* padding */
__le32 i_extra_end[0]; /* for attribute size calculation */
} __packed;
__le32 i_addr[DEF_ADDRS_PER_INODE]; /* Pointers to data blocks */
diff --git a/include/linux/falloc.h b/include/linux/falloc.h
index 674d59f4d6ce..f3f0b97b1675 100644
--- a/include/linux/falloc.h
+++ b/include/linux/falloc.h
@@ -20,7 +20,10 @@ struct space_resv {
};
#define FS_IOC_RESVSP _IOW('X', 40, struct space_resv)
+#define FS_IOC_UNRESVSP _IOW('X', 41, struct space_resv)
#define FS_IOC_RESVSP64 _IOW('X', 42, struct space_resv)
+#define FS_IOC_UNRESVSP64 _IOW('X', 43, struct space_resv)
+#define FS_IOC_ZERO_RANGE _IOW('X', 57, struct space_resv)
#define FALLOC_FL_SUPPORTED_MASK (FALLOC_FL_KEEP_SIZE | \
FALLOC_FL_PUNCH_HOLE | \
@@ -29,4 +32,25 @@ struct space_resv {
FALLOC_FL_INSERT_RANGE | \
FALLOC_FL_UNSHARE_RANGE)
+/* on ia32 l_start is on a 32-bit boundary */
+#if defined(CONFIG_X86_64)
+struct space_resv_32 {
+ __s16 l_type;
+ __s16 l_whence;
+ __s64 l_start __attribute__((packed));
+ /* len == 0 means until end of file */
+ __s64 l_len __attribute__((packed));
+ __s32 l_sysid;
+ __u32 l_pid;
+ __s32 l_pad[4]; /* reserve area */
+};
+
+#define FS_IOC_RESVSP_32 _IOW ('X', 40, struct space_resv_32)
+#define FS_IOC_UNRESVSP_32 _IOW ('X', 41, struct space_resv_32)
+#define FS_IOC_RESVSP64_32 _IOW ('X', 42, struct space_resv_32)
+#define FS_IOC_UNRESVSP64_32 _IOW ('X', 43, struct space_resv_32)
+#define FS_IOC_ZERO_RANGE_32 _IOW ('X', 57, struct space_resv_32)
+
+#endif
+
#endif /* _FALLOC_H_ */
diff --git a/include/linux/fb.h b/include/linux/fb.h
index 756706b666a1..3b4b2f0c6994 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -472,7 +472,7 @@ struct fb_info {
struct fb_deferred_io *fbdefio;
#endif
- struct fb_ops *fbops;
+ const struct fb_ops *fbops;
struct device *device; /* This is the parent */
struct device *dev; /* This is this fb device */
int class_flag; /* private sysfs flags */
@@ -606,8 +606,7 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
/* drivers/video/fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
extern void unregister_framebuffer(struct fb_info *fb_info);
-extern void unlink_framebuffer(struct fb_info *fb_info);
-extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev, int res_id,
+extern int remove_conflicting_pci_framebuffers(struct pci_dev *pdev,
const char *name);
extern int remove_conflicting_framebuffers(struct apertures_struct *a,
const char *name, bool primary);
@@ -626,6 +625,7 @@ extern int fb_new_modelist(struct fb_info *info);
extern struct fb_info *registered_fb[FB_MAX];
extern int num_registered_fb;
extern bool fb_center_logo;
+extern int fb_logo_count;
extern struct class *fb_class;
#define for_each_registered_fb(i) \
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index d019df946cb2..7bcdcf4f6ab2 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -2,15 +2,29 @@
#ifndef _LINUX_FCNTL_H
#define _LINUX_FCNTL_H
+#include <linux/stat.h>
#include <uapi/linux/fcntl.h>
-/* list of all valid flags for the open/openat flags argument: */
+/* List of all valid flags for the open/openat flags argument: */
#define VALID_OPEN_FLAGS \
(O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \
O_APPEND | O_NDELAY | O_NONBLOCK | O_NDELAY | __O_SYNC | O_DSYNC | \
FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \
O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
+/* List of all valid flags for the how->upgrade_mask argument: */
+#define VALID_UPGRADE_FLAGS \
+ (UPGRADE_NOWRITE | UPGRADE_NOREAD)
+
+/* List of all valid flags for the how->resolve argument: */
+#define VALID_RESOLVE_FLAGS \
+ (RESOLVE_NO_XDEV | RESOLVE_NO_MAGICLINKS | RESOLVE_NO_SYMLINKS | \
+ RESOLVE_BENEATH | RESOLVE_IN_ROOT)
+
+/* List of all open_how "versions". */
+#define OPEN_HOW_SIZE_VER0 24 /* sizeof first published struct */
+#define OPEN_HOW_SIZE_LATEST OPEN_HOW_SIZE_VER0
+
#ifndef force_o_largefile
#define force_o_largefile() (!IS_ENABLED(CONFIG_ARCH_32BIT_OFF_T))
#endif
diff --git a/include/linux/file.h b/include/linux/file.h
index 3fcddff56bc4..142d102f285e 100644
--- a/include/linux/file.h
+++ b/include/linux/file.h
@@ -16,6 +16,7 @@ extern void fput(struct file *);
extern void fput_many(struct file *, unsigned int);
struct file_operations;
+struct task_struct;
struct vfsmount;
struct dentry;
struct inode;
@@ -47,6 +48,7 @@ static inline void fdput(struct fd fd)
extern struct file *fget(unsigned int fd);
extern struct file *fget_many(unsigned int fd, unsigned int refs);
extern struct file *fget_raw(unsigned int fd);
+extern struct file *fget_task(struct task_struct *task, unsigned int fd);
extern unsigned long __fdget(unsigned int fd);
extern unsigned long __fdget_raw(unsigned int fd);
extern unsigned long __fdget_pos(unsigned int fd);
@@ -83,6 +85,7 @@ extern int f_dupfd(unsigned int from, struct file *file, unsigned flags);
extern int replace_fd(unsigned fd, struct file *file, unsigned flags);
extern void set_close_on_exec(unsigned int fd, int flag);
extern bool get_close_on_exec(unsigned int fd);
+extern int __get_unused_fd_flags(unsigned flags, unsigned long nofile);
extern int get_unused_fd_flags(unsigned flags);
extern void put_unused_fd(unsigned int fd);
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 0367a75f873b..f349e2c0884c 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -65,6 +65,9 @@ struct ctl_table_header;
/* unused opcode to mark special call to bpf_tail_call() helper */
#define BPF_TAIL_CALL 0xf0
+/* unused opcode to mark special load instruction. Same as BPF_ABS */
+#define BPF_PROBE_MEM 0x20
+
/* unused opcode to mark call to interpreter with arguments */
#define BPF_CALL_ARGS 0xe0
@@ -417,7 +420,7 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
#define BPF_FIELD_SIZEOF(type, field) \
({ \
- const int __size = bytes_to_bpf_size(FIELD_SIZEOF(type, field)); \
+ const int __size = bytes_to_bpf_size(sizeof_field(type, field)); \
BUILD_BUG_ON(__size < 0); \
__size; \
})
@@ -464,10 +467,11 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
#define BPF_CALL_x(x, name, ...) \
static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
+ typedef u64 (*btf_##name)(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__)); \
u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)); \
u64 name(__BPF_REG(x, __BPF_DECL_REGS, __BPF_N, __VA_ARGS__)) \
{ \
- return ____##name(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
+ return ((btf_##name)____##name)(__BPF_MAP(x,__BPF_CAST,__BPF_N,__VA_ARGS__));\
} \
static __always_inline \
u64 ____##name(__BPF_MAP(x, __BPF_DECL_ARGS, __BPF_V, __VA_ARGS__))
@@ -493,7 +497,7 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
#define bpf_target_off(TYPE, MEMBER, SIZE, PTR_SIZE) \
({ \
- BUILD_BUG_ON(FIELD_SIZEOF(TYPE, MEMBER) != (SIZE)); \
+ BUILD_BUG_ON(sizeof_field(TYPE, MEMBER) != (SIZE)); \
*(PTR_SIZE) = (SIZE); \
offsetof(TYPE, MEMBER); \
})
@@ -511,10 +515,12 @@ struct sock_fprog_kern {
struct sock_filter *filter;
};
+/* Some arches need doubleword alignment for their instructions and/or data */
+#define BPF_IMAGE_ALIGNMENT 8
+
struct bpf_binary_header {
u32 pages;
- /* Some arches need word alignment for their instructions */
- u8 image[] __aligned(4);
+ u8 image[] __aligned(BPF_IMAGE_ALIGNMENT);
};
struct bpf_prog {
@@ -553,23 +559,26 @@ struct sk_filter {
DECLARE_STATIC_KEY_FALSE(bpf_stats_enabled_key);
-#define BPF_PROG_RUN(prog, ctx) ({ \
- u32 ret; \
- cant_sleep(); \
- if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
- struct bpf_prog_stats *stats; \
- u64 start = sched_clock(); \
- ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \
- stats = this_cpu_ptr(prog->aux->stats); \
- u64_stats_update_begin(&stats->syncp); \
- stats->cnt++; \
- stats->nsecs += sched_clock() - start; \
- u64_stats_update_end(&stats->syncp); \
- } else { \
- ret = (*(prog)->bpf_func)(ctx, (prog)->insnsi); \
- } \
+#define __BPF_PROG_RUN(prog, ctx, dfunc) ({ \
+ u32 ret; \
+ cant_sleep(); \
+ if (static_branch_unlikely(&bpf_stats_enabled_key)) { \
+ struct bpf_prog_stats *stats; \
+ u64 start = sched_clock(); \
+ ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
+ stats = this_cpu_ptr(prog->aux->stats); \
+ u64_stats_update_begin(&stats->syncp); \
+ stats->cnt++; \
+ stats->nsecs += sched_clock() - start; \
+ u64_stats_update_end(&stats->syncp); \
+ } else { \
+ ret = dfunc(ctx, (prog)->insnsi, (prog)->bpf_func); \
+ } \
ret; })
+#define BPF_PROG_RUN(prog, ctx) __BPF_PROG_RUN(prog, ctx, \
+ bpf_dispatcher_nopfunc)
+
#define BPF_SKB_CB_LEN QDISC_CB_PRIV_LEN
struct bpf_skb_data_end {
@@ -583,7 +592,6 @@ struct bpf_redirect_info {
u32 tgt_index;
void *tgt_value;
struct bpf_map *map;
- struct bpf_map *map_to_flush;
u32 kern_flags;
};
@@ -602,7 +610,7 @@ static inline void bpf_compute_data_pointers(struct sk_buff *skb)
{
struct bpf_skb_data_end *cb = (struct bpf_skb_data_end *)skb->cb;
- BUILD_BUG_ON(sizeof(*cb) > FIELD_SIZEOF(struct sk_buff, cb));
+ BUILD_BUG_ON(sizeof(*cb) > sizeof_field(struct sk_buff, cb));
cb->data_meta = skb->data - skb_metadata_len(skb);
cb->data_end = skb->data + skb_headlen(skb);
}
@@ -640,9 +648,9 @@ static inline u8 *bpf_skb_cb(struct sk_buff *skb)
* attached to sockets, we need to clear the bpf_skb_cb() area
* to not leak previous contents to user space.
*/
- BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
- BUILD_BUG_ON(FIELD_SIZEOF(struct __sk_buff, cb) !=
- FIELD_SIZEOF(struct qdisc_skb_cb, data));
+ BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) != BPF_SKB_CB_LEN);
+ BUILD_BUG_ON(sizeof_field(struct __sk_buff, cb) !=
+ sizeof_field(struct qdisc_skb_cb, data));
return qdisc_skb_cb(skb)->data;
}
@@ -693,6 +701,8 @@ static inline u32 bpf_prog_run_clear_cb(const struct bpf_prog *prog,
return res;
}
+DECLARE_BPF_DISPATCHER(bpf_dispatcher_xdp)
+
static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
struct xdp_buff *xdp)
{
@@ -702,9 +712,12 @@ static __always_inline u32 bpf_prog_run_xdp(const struct bpf_prog *prog,
* already takes rcu_read_lock() when fetching the program, so
* it's not necessary here anymore.
*/
- return BPF_PROG_RUN(prog, xdp);
+ return __BPF_PROG_RUN(prog, xdp,
+ BPF_DISPATCHER_FUNC(bpf_dispatcher_xdp));
}
+void bpf_prog_change_xdp(struct bpf_prog *prev_prog, struct bpf_prog *prog);
+
static inline u32 bpf_prog_insn_size(const struct bpf_prog *prog)
{
return prog->len * sizeof(struct bpf_insn);
@@ -770,8 +783,12 @@ bpf_ctx_narrow_access_offset(u32 off, u32 size, u32 size_default)
static inline void bpf_prog_lock_ro(struct bpf_prog *fp)
{
- set_vm_flush_reset_perms(fp);
- set_memory_ro((unsigned long)fp, fp->pages);
+#ifndef CONFIG_BPF_JIT_ALWAYS_ON
+ if (!fp->jited) {
+ set_vm_flush_reset_perms(fp);
+ set_memory_ro((unsigned long)fp, fp->pages);
+ }
+#endif
}
static inline void bpf_jit_binary_lock_ro(struct bpf_binary_header *hdr)
@@ -826,6 +843,8 @@ int bpf_prog_create(struct bpf_prog **pfp, struct sock_fprog_kern *fprog);
int bpf_prog_create_from_user(struct bpf_prog **pfp, struct sock_fprog *fprog,
bpf_aux_classic_check_t trans, bool save_orig);
void bpf_prog_destroy(struct bpf_prog *fp);
+const struct bpf_func_proto *
+bpf_base_func_proto(enum bpf_func_id func_id);
int sk_attach_filter(struct sock_fprog *fprog, struct sock *sk);
int sk_attach_bpf(u32 ufd, struct sock *sk);
@@ -899,7 +918,7 @@ static inline int xdp_ok_fwd_dev(const struct net_device *fwd,
return 0;
}
-/* The pair of xdp_do_redirect and xdp_do_flush_map MUST be called in the
+/* The pair of xdp_do_redirect and xdp_do_flush MUST be called in the
* same cpu context. Further for best results no more than a single map
* for the do_redirect/do_flush pair should be used. This limitation is
* because we only track one map and force a flush when the map changes.
@@ -910,7 +929,13 @@ int xdp_do_generic_redirect(struct net_device *dev, struct sk_buff *skb,
int xdp_do_redirect(struct net_device *dev,
struct xdp_buff *xdp,
struct bpf_prog *prog);
-void xdp_do_flush_map(void);
+void xdp_do_flush(void);
+
+/* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as
+ * it is no longer only flushing maps. Keep this define for compatibility
+ * until all drivers are updated - do not use xdp_do_flush_map() in new code!
+ */
+#define xdp_do_flush_map xdp_do_flush
void bpf_warn_invalid_xdp_action(u32 act);
@@ -946,6 +971,9 @@ void *bpf_jit_alloc_exec(unsigned long size);
void bpf_jit_free_exec(void *addr);
void bpf_jit_free(struct bpf_prog *fp);
+int bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
+ struct bpf_jit_poke_descriptor *poke);
+
int bpf_jit_get_func_addr(const struct bpf_prog *prog,
const struct bpf_insn *insn, bool extra_pass,
u64 *func_addr, bool *func_addr_fixed);
@@ -1044,11 +1072,23 @@ static inline bool ebpf_jit_enabled(void)
return false;
}
+static inline bool bpf_jit_blinding_enabled(struct bpf_prog *prog)
+{
+ return false;
+}
+
static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
{
return false;
}
+static inline int
+bpf_jit_add_poke_descriptor(struct bpf_prog *prog,
+ struct bpf_jit_poke_descriptor *poke)
+{
+ return -ENOTSUPP;
+}
+
static inline void bpf_jit_free(struct bpf_prog *fp)
{
bpf_prog_unlock_free(fp);
diff --git a/include/linux/firmware.h b/include/linux/firmware.h
index 2dd566c91d44..4bbd0afd91b7 100644
--- a/include/linux/firmware.h
+++ b/include/linux/firmware.h
@@ -44,6 +44,8 @@ int request_firmware(const struct firmware **fw, const char *name,
struct device *device);
int firmware_request_nowarn(const struct firmware **fw, const char *name,
struct device *device);
+int firmware_request_platform(const struct firmware **fw, const char *name,
+ struct device *device);
int request_firmware_nowait(
struct module *module, bool uevent,
const char *name, struct device *device, gfp_t gfp, void *context,
@@ -69,6 +71,13 @@ static inline int firmware_request_nowarn(const struct firmware **fw,
return -EINVAL;
}
+static inline int firmware_request_platform(const struct firmware **fw,
+ const char *name,
+ struct device *device)
+{
+ return -EINVAL;
+}
+
static inline int request_firmware_nowait(
struct module *module, bool uevent,
const char *name, struct device *device, gfp_t gfp, void *context,
diff --git a/include/linux/firmware/broadcom/tee_bnxt_fw.h b/include/linux/firmware/broadcom/tee_bnxt_fw.h
new file mode 100644
index 000000000000..f24c82d6ef73
--- /dev/null
+++ b/include/linux/firmware/broadcom/tee_bnxt_fw.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: BSD-2-Clause */
+/*
+ * Copyright 2019 Broadcom.
+ */
+
+#ifndef _BROADCOM_TEE_BNXT_FW_H
+#define _BROADCOM_TEE_BNXT_FW_H
+
+#include <linux/types.h>
+
+int tee_bnxt_fw_load(void);
+int tee_bnxt_copy_coredump(void *buf, u32 offset, u32 size);
+
+#endif /* _BROADCOM_TEE_BNXT_FW_H */
diff --git a/include/linux/firmware/intel/stratix10-svc-client.h b/include/linux/firmware/intel/stratix10-svc-client.h
index b6c4302a39e0..59bc6e2af693 100644
--- a/include/linux/firmware/intel/stratix10-svc-client.h
+++ b/include/linux/firmware/intel/stratix10-svc-client.h
@@ -41,6 +41,12 @@
*
* SVC_STATUS_RSU_OK:
* Secure firmware accepts the request of remote status update (RSU).
+ *
+ * SVC_STATUS_RSU_ERROR:
+ * Error encountered during remote system update.
+ *
+ * SVC_STATUS_RSU_NO_SUPPORT:
+ * Secure firmware doesn't support RSU retry or notify feature.
*/
#define SVC_STATUS_RECONFIG_REQUEST_OK 0
#define SVC_STATUS_RECONFIG_BUFFER_SUBMITTED 1
@@ -50,6 +56,8 @@
#define SVC_STATUS_RECONFIG_ERROR 5
#define SVC_STATUS_RSU_OK 6
#define SVC_STATUS_RSU_ERROR 7
+#define SVC_STATUS_RSU_NO_SUPPORT 8
+
/**
* Flag bit for COMMAND_RECONFIG
*
diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h
index 7613bf7c9442..6669e2a1d5fd 100644
--- a/include/linux/firmware/meson/meson_sm.h
+++ b/include/linux/firmware/meson/meson_sm.h
@@ -16,11 +16,14 @@ enum {
struct meson_sm_firmware;
-int meson_sm_call(unsigned int cmd_index, u32 *ret, u32 arg0, u32 arg1,
- u32 arg2, u32 arg3, u32 arg4);
-int meson_sm_call_write(void *buffer, unsigned int b_size, unsigned int cmd_index,
- u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
-int meson_sm_call_read(void *buffer, unsigned int bsize, unsigned int cmd_index,
- u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+int meson_sm_call(struct meson_sm_firmware *fw, unsigned int cmd_index,
+ u32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+int meson_sm_call_write(struct meson_sm_firmware *fw, void *buffer,
+ unsigned int b_size, unsigned int cmd_index, u32 arg0,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+int meson_sm_call_read(struct meson_sm_firmware *fw, void *buffer,
+ unsigned int bsize, unsigned int cmd_index, u32 arg0,
+ u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+struct meson_sm_firmware *meson_sm_get(struct device_node *firmware_node);
#endif /* _MESON_SM_FW_H_ */
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index 778abbbc7d94..2cd12ebd6826 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -2,7 +2,7 @@
/*
* Xilinx Zynq MPSoC Firmware layer
*
- * Copyright (C) 2014-2018 Xilinx
+ * Copyright (C) 2014-2019 Xilinx
*
* Michal Simek <michal.simek@xilinx.com>
* Davorin Mista <davorin.mista@aggios.com>
@@ -46,6 +46,11 @@
#define ZYNQMP_PM_CAPABILITY_ACCESS 0x1U
#define ZYNQMP_PM_CAPABILITY_CONTEXT 0x2U
#define ZYNQMP_PM_CAPABILITY_WAKEUP 0x4U
+#define ZYNQMP_PM_CAPABILITY_UNUSABLE 0x8U
+
+/* Feature check status */
+#define PM_FEATURE_INVALID -1
+#define PM_FEATURE_UNCHECKED 0
/*
* Firmware FPGA Manager flags
@@ -77,21 +82,26 @@ enum pm_api_id {
PM_CLOCK_GETRATE,
PM_CLOCK_SETPARENT,
PM_CLOCK_GETPARENT,
+ PM_FEATURE_CHECK = 63,
+ PM_API_MAX,
};
/* PMU-FW return status codes */
enum pm_ret_status {
XST_PM_SUCCESS = 0,
+ XST_PM_NO_FEATURE = 19,
XST_PM_INTERNAL = 2000,
XST_PM_CONFLICT,
XST_PM_NO_ACCESS,
XST_PM_INVALID_NODE,
XST_PM_DOUBLE_REQ,
XST_PM_ABORT_SUSPEND,
+ XST_PM_MULT_USER = 2008,
};
enum pm_ioctl_id {
- IOCTL_SET_PLL_FRAC_MODE = 8,
+ IOCTL_SET_SD_TAPDELAY = 7,
+ IOCTL_SET_PLL_FRAC_MODE,
IOCTL_GET_PLL_FRAC_MODE,
IOCTL_SET_PLL_FRAC_DATA,
IOCTL_GET_PLL_FRAC_DATA,
@@ -105,6 +115,7 @@ enum pm_query_id {
PM_QID_CLOCK_GET_PARENTS,
PM_QID_CLOCK_GET_ATTRIBUTES,
PM_QID_CLOCK_GET_NUM_CLOCKS = 12,
+ PM_QID_CLOCK_GET_MAX_DIVISOR,
};
enum zynqmp_pm_reset_action {
@@ -250,6 +261,16 @@ enum zynqmp_pm_request_ack {
ZYNQMP_PM_REQUEST_ACK_NON_BLOCKING,
};
+enum pm_node_id {
+ NODE_SD_0 = 39,
+ NODE_SD_1,
+};
+
+enum tap_delay_type {
+ PM_TAPDELAY_INPUT = 0,
+ PM_TAPDELAY_OUTPUT,
+};
+
/**
* struct zynqmp_pm_query_data - PM query data
* @qid: query ID
diff --git a/include/linux/fs.h b/include/linux/fs.h
index e0d909d35763..3d69de600494 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -67,7 +67,7 @@ struct fscrypt_operations;
struct fsverity_info;
struct fsverity_operations;
struct fs_context;
-struct fs_parameter_description;
+struct fs_parameter_spec;
extern void __init inode_init(void);
extern void __init inode_init_early(void);
@@ -698,6 +698,7 @@ struct inode {
struct rcu_head i_rcu;
};
atomic64_t i_version;
+ atomic64_t i_sequence; /* see futex */
atomic_t i_count;
atomic_t i_dio_count;
atomic_t i_writecount;
@@ -855,7 +856,7 @@ static inline loff_t i_size_read(const struct inode *inode)
i_size = inode->i_size;
} while (read_seqcount_retry(&inode->i_size_seqcount, seq));
return i_size;
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
loff_t i_size;
preempt_disable();
@@ -880,7 +881,7 @@ static inline void i_size_write(struct inode *inode, loff_t i_size)
inode->i_size = i_size;
write_seqcount_end(&inode->i_size_seqcount);
preempt_enable();
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
+#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPTION)
preempt_disable();
inode->i_size = i_size;
preempt_enable();
@@ -1575,7 +1576,6 @@ static inline void i_gid_write(struct inode *inode, gid_t gid)
inode->i_gid = make_kgid(inode->i_sb->s_user_ns, gid);
}
-extern struct timespec64 timespec64_trunc(struct timespec64 t, unsigned gran);
extern struct timespec64 current_time(struct inode *inode);
/*
@@ -1727,6 +1727,13 @@ int vfs_mkobj(struct dentry *, umode_t,
extern long vfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
+extern long compat_ptr_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg);
+#else
+#define compat_ptr_ioctl NULL
+#endif
+
/*
* VFS file helper functions.
*/
@@ -2071,6 +2078,18 @@ static inline void init_sync_kiocb(struct kiocb *kiocb, struct file *filp)
};
}
+static inline void kiocb_clone(struct kiocb *kiocb, struct kiocb *kiocb_src,
+ struct file *filp)
+{
+ *kiocb = (struct kiocb) {
+ .ki_filp = filp,
+ .ki_flags = kiocb_src->ki_flags,
+ .ki_hint = kiocb_src->ki_hint,
+ .ki_ioprio = kiocb_src->ki_ioprio,
+ .ki_pos = kiocb_src->ki_pos,
+ };
+}
+
/*
* Inode state bits. Protected by inode->i_lock
*
@@ -2217,7 +2236,7 @@ struct file_system_type {
#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
int (*init_fs_context)(struct fs_context *);
- const struct fs_parameter_description *parameters;
+ const struct fs_parameter_spec *parameters;
struct dentry *(*mount) (struct file_system_type *, int,
const char *, void *);
void (*kill_sb) (struct super_block *);
@@ -2545,10 +2564,6 @@ extern int finish_open(struct file *file, struct dentry *dentry,
int (*open)(struct inode *, struct file *));
extern int finish_no_open(struct file *file, struct dentry *dentry);
-/* fs/ioctl.c */
-
-extern int ioctl_preallocate(struct file *filp, void __user *argp);
-
/* fs/dcache.c */
extern void __init vfs_caches_init_early(void);
extern void __init vfs_caches_init(void);
@@ -2632,8 +2647,6 @@ extern void bd_finish_claiming(struct block_device *bdev,
extern void bd_abort_claiming(struct block_device *bdev,
struct block_device *whole, void *holder);
extern void blkdev_put(struct block_device *bdev, fmode_t mode);
-extern int __blkdev_reread_part(struct block_device *bdev);
-extern int blkdev_reread_part(struct block_device *bdev);
#ifdef CONFIG_SYSFS
extern int bd_link_disk_holder(struct block_device *bdev, struct gendisk *disk);
@@ -2687,7 +2700,6 @@ static inline void unregister_chrdev(unsigned int major, const char *name)
#ifdef CONFIG_BLOCK
#define BLKDEV_MAJOR_MAX 512
-extern const char *__bdevname(dev_t, char *buffer);
extern const char *bdevname(struct block_device *bdev, char *buffer);
extern struct block_device *lookup_bdev(const char *);
extern void blkdev_show(struct seq_file *,off_t);
@@ -2703,8 +2715,6 @@ extern void make_bad_inode(struct inode *);
extern bool is_bad_inode(struct inode *);
#ifdef CONFIG_BLOCK
-extern void check_disk_size_change(struct gendisk *disk,
- struct block_device *bdev, bool verbose);
extern int revalidate_disk(struct gendisk *);
extern int check_disk_change(struct block_device *);
extern int __invalidate_device(struct block_device *, bool);
@@ -2738,7 +2748,6 @@ static inline int filemap_fdatawait(struct address_space *mapping)
extern bool filemap_range_has_page(struct address_space *, loff_t lstart,
loff_t lend);
-extern int filemap_write_and_wait(struct address_space *mapping);
extern int filemap_write_and_wait_range(struct address_space *mapping,
loff_t lstart, loff_t lend);
extern int __filemap_fdatawrite_range(struct address_space *mapping,
@@ -2748,6 +2757,11 @@ extern int filemap_fdatawrite_range(struct address_space *mapping,
extern int filemap_check_errors(struct address_space *mapping);
extern void __filemap_set_wb_err(struct address_space *mapping, int err);
+static inline int filemap_write_and_wait(struct address_space *mapping)
+{
+ return filemap_write_and_wait_range(mapping, 0, LLONG_MAX);
+}
+
extern int __must_check file_fdatawait_range(struct file *file, loff_t lstart,
loff_t lend);
extern int __must_check file_check_and_advance_wb_err(struct file *file);
@@ -2862,9 +2876,16 @@ static inline ssize_t generic_write_sync(struct kiocb *iocb, ssize_t count)
extern void emergency_sync(void);
extern void emergency_remount(void);
+
#ifdef CONFIG_BLOCK
-extern sector_t bmap(struct inode *, sector_t);
+extern int bmap(struct inode *inode, sector_t *block);
+#else
+static inline int bmap(struct inode *inode, sector_t *block)
+{
+ return -EINVAL;
+}
#endif
+
extern int notify_change(struct dentry *, struct iattr *, struct inode **);
extern int inode_permission(struct inode *, int);
extern int generic_permission(struct inode *, int);
@@ -2961,6 +2982,7 @@ extern int do_pipe_flags(int *, int);
id(UNKNOWN, unknown) \
id(FIRMWARE, firmware) \
id(FIRMWARE_PREALLOC_BUFFER, firmware) \
+ id(FIRMWARE_EFI_EMBEDDED, firmware) \
id(MODULE, kernel-module) \
id(KEXEC_IMAGE, kexec-image) \
id(KEXEC_INITRAMFS, kexec-initramfs) \
@@ -2991,6 +3013,8 @@ extern int kernel_read_file(struct file *, void **, loff_t *, loff_t,
enum kernel_read_file_id);
extern int kernel_read_file_from_path(const char *, void **, loff_t *, loff_t,
enum kernel_read_file_id);
+extern int kernel_read_file_from_path_initns(const char *, void **, loff_t *, loff_t,
+ enum kernel_read_file_id);
extern int kernel_read_file_from_fd(int, void **, loff_t *, loff_t,
enum kernel_read_file_id);
extern ssize_t kernel_read(struct file *, void *, size_t, loff_t *);
@@ -3105,6 +3129,10 @@ ssize_t vfs_iter_read(struct file *file, struct iov_iter *iter, loff_t *ppos,
rwf_t flags);
ssize_t vfs_iter_write(struct file *file, struct iov_iter *iter, loff_t *ppos,
rwf_t flags);
+ssize_t vfs_iocb_iter_read(struct file *file, struct kiocb *iocb,
+ struct iov_iter *iter);
+ssize_t vfs_iocb_iter_write(struct file *file, struct kiocb *iocb,
+ struct iov_iter *iter);
/* fs/block_dev.c */
extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
@@ -3153,7 +3181,6 @@ enum {
};
void dio_end_io(struct bio *bio);
-void dio_warn_stale_pagecache(struct file *filp);
ssize_t __blockdev_direct_IO(struct kiocb *iocb, struct inode *inode,
struct block_device *bdev, struct iov_iter *iter,
@@ -3198,6 +3225,11 @@ static inline void inode_dio_end(struct inode *inode)
wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
}
+/*
+ * Warn about a page cache invalidation failure diring a direct I/O write.
+ */
+void dio_warn_stale_pagecache(struct file *filp);
+
extern void inode_set_flags(struct inode *inode, unsigned int flags,
unsigned int mask);
@@ -3296,6 +3328,8 @@ extern int simple_unlink(struct inode *, struct dentry *);
extern int simple_rmdir(struct inode *, struct dentry *);
extern int simple_rename(struct inode *, struct dentry *,
struct inode *, struct dentry *, unsigned int);
+extern void simple_recursive_removal(struct dentry *,
+ void (*callback)(struct dentry *));
extern int noop_fsync(struct file *, loff_t, loff_t, int);
extern int noop_set_page_dirty(struct page *page);
extern void noop_invalidatepage(struct page *page, unsigned int offset,
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
index e5c14e2c53d3..e6c3e4c61dad 100644
--- a/include/linux/fs_context.h
+++ b/include/linux/fs_context.h
@@ -54,7 +54,6 @@ enum fs_value_type {
fs_value_is_string, /* Value is a string */
fs_value_is_blob, /* Value is a binary blob */
fs_value_is_filename, /* Value is a filename* + dirfd */
- fs_value_is_filename_empty, /* Value is a filename* + dirfd + AT_EMPTY_PATH */
fs_value_is_file, /* Value is a file* */
};
@@ -74,6 +73,11 @@ struct fs_parameter {
int dirfd;
};
+struct p_log {
+ const char *prefix;
+ struct fc_log *log;
+};
+
/*
* Filesystem context for holding the parameters used in the creation or
* reconfiguration of a superblock.
@@ -93,7 +97,7 @@ struct fs_context {
struct user_namespace *user_ns; /* The user namespace for this mount */
struct net *net_ns; /* The network namespace for this mount */
const struct cred *cred; /* The mounter's credentials */
- struct fc_log *log; /* Logging buffer */
+ struct p_log log; /* Logging buffer */
const char *source; /* The source name (eg. dev path) */
void *security; /* Linux S&M options */
void *s_fs_info; /* Proposed s_fs_info */
@@ -182,9 +186,13 @@ struct fc_log {
char *buffer[8];
};
-extern __attribute__((format(printf, 2, 3)))
-void logfc(struct fs_context *fc, const char *fmt, ...);
+extern __attribute__((format(printf, 4, 5)))
+void logfc(struct fc_log *log, const char *prefix, char level, const char *fmt, ...);
+#define __logfc(fc, l, fmt, ...) logfc((fc)->log.log, NULL, \
+ l, fmt, ## __VA_ARGS__)
+#define __plog(p, l, fmt, ...) logfc((p)->log, (p)->prefix, \
+ l, fmt, ## __VA_ARGS__)
/**
* infof - Store supplementary informational message
* @fc: The context in which to log the informational message
@@ -193,7 +201,9 @@ void logfc(struct fs_context *fc, const char *fmt, ...);
* Store the supplementary informational message for the process if the process
* has enabled the facility.
*/
-#define infof(fc, fmt, ...) ({ logfc(fc, "i "fmt, ## __VA_ARGS__); })
+#define infof(fc, fmt, ...) __logfc(fc, 'i', fmt, ## __VA_ARGS__)
+#define info_plog(p, fmt, ...) __plog(p, 'i', fmt, ## __VA_ARGS__)
+#define infofc(p, fmt, ...) __plog((&(fc)->log), 'i', fmt, ## __VA_ARGS__)
/**
* warnf - Store supplementary warning message
@@ -203,7 +213,9 @@ void logfc(struct fs_context *fc, const char *fmt, ...);
* Store the supplementary warning message for the process if the process has
* enabled the facility.
*/
-#define warnf(fc, fmt, ...) ({ logfc(fc, "w "fmt, ## __VA_ARGS__); })
+#define warnf(fc, fmt, ...) __logfc(fc, 'w', fmt, ## __VA_ARGS__)
+#define warn_plog(p, fmt, ...) __plog(p, 'w', fmt, ## __VA_ARGS__)
+#define warnfc(fc, fmt, ...) __plog((&(fc)->log), 'w', fmt, ## __VA_ARGS__)
/**
* errorf - Store supplementary error message
@@ -213,7 +225,9 @@ void logfc(struct fs_context *fc, const char *fmt, ...);
* Store the supplementary error message for the process if the process has
* enabled the facility.
*/
-#define errorf(fc, fmt, ...) ({ logfc(fc, "e "fmt, ## __VA_ARGS__); })
+#define errorf(fc, fmt, ...) __logfc(fc, 'e', fmt, ## __VA_ARGS__)
+#define error_plog(p, fmt, ...) __plog(p, 'e', fmt, ## __VA_ARGS__)
+#define errorfc(fc, fmt, ...) __plog((&(fc)->log), 'e', fmt, ## __VA_ARGS__)
/**
* invalf - Store supplementary invalid argument error message
@@ -223,6 +237,8 @@ void logfc(struct fs_context *fc, const char *fmt, ...);
* Store the supplementary error message for the process if the process has
* enabled the facility and return -EINVAL.
*/
-#define invalf(fc, fmt, ...) ({ errorf(fc, fmt, ## __VA_ARGS__); -EINVAL; })
+#define invalf(fc, fmt, ...) (errorf(fc, fmt, ## __VA_ARGS__), -EINVAL)
+#define inval_plog(p, fmt, ...) (error_plog(p, fmt, ## __VA_ARGS__), -EINVAL)
+#define invalfc(fc, fmt, ...) (errorfc(fc, fmt, ## __VA_ARGS__), -EINVAL)
#endif /* _LINUX_FS_CONTEXT_H */
diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h
index dee140db6240..2eab6d5f6736 100644
--- a/include/linux/fs_parser.h
+++ b/include/linux/fs_parser.h
@@ -17,26 +17,18 @@ struct constant_table {
int value;
};
+struct fs_parameter_spec;
+struct fs_parse_result;
+typedef int fs_param_type(struct p_log *,
+ const struct fs_parameter_spec *,
+ struct fs_parameter *,
+ struct fs_parse_result *);
/*
* The type of parameter expected.
*/
-enum fs_parameter_type {
- __fs_param_wasnt_defined,
- fs_param_is_flag,
- fs_param_is_bool,
- fs_param_is_u32,
- fs_param_is_u32_octal,
- fs_param_is_u32_hex,
- fs_param_is_s32,
- fs_param_is_u64,
- fs_param_is_enum,
- fs_param_is_string,
- fs_param_is_blob,
- fs_param_is_blockdev,
- fs_param_is_path,
- fs_param_is_fd,
- nr__fs_parameter_type,
-};
+fs_param_type fs_param_is_bool, fs_param_is_u32, fs_param_is_s32, fs_param_is_u64,
+ fs_param_is_enum, fs_param_is_string, fs_param_is_blob, fs_param_is_blockdev,
+ fs_param_is_path, fs_param_is_fd;
/*
* Specification of the type of value a parameter wants.
@@ -46,25 +38,13 @@ enum fs_parameter_type {
*/
struct fs_parameter_spec {
const char *name;
+ fs_param_type *type; /* The desired parameter type */
u8 opt; /* Option number (returned by fs_parse()) */
- enum fs_parameter_type type:8; /* The desired parameter type */
unsigned short flags;
-#define fs_param_v_optional 0x0001 /* The value is optional */
#define fs_param_neg_with_no 0x0002 /* "noxxx" is negative param */
#define fs_param_neg_with_empty 0x0004 /* "xxx=" is negative param */
#define fs_param_deprecated 0x0008 /* The param is deprecated */
-};
-
-struct fs_parameter_enum {
- u8 opt; /* Option number (as fs_parameter_spec::opt) */
- char name[14];
- u8 value;
-};
-
-struct fs_parameter_description {
- const char name[16]; /* Name for logging purposes */
- const struct fs_parameter_spec *specs; /* List of param specifications */
- const struct fs_parameter_enum *enums; /* Enum values */
+ const void *data;
};
/*
@@ -72,7 +52,6 @@ struct fs_parameter_description {
*/
struct fs_parse_result {
bool negated; /* T if param was "noxxx" */
- bool has_value; /* T if value supplied to param */
union {
bool boolean; /* For spec_bool */
int int_32; /* For spec_s32/spec_enum */
@@ -81,28 +60,37 @@ struct fs_parse_result {
};
};
-extern int fs_parse(struct fs_context *fc,
- const struct fs_parameter_description *desc,
+extern int __fs_parse(struct p_log *log,
+ const struct fs_parameter_spec *desc,
struct fs_parameter *value,
struct fs_parse_result *result);
+
+static inline int fs_parse(struct fs_context *fc,
+ const struct fs_parameter_spec *desc,
+ struct fs_parameter *param,
+ struct fs_parse_result *result)
+{
+ return __fs_parse(&fc->log, desc, param, result);
+}
+
extern int fs_lookup_param(struct fs_context *fc,
struct fs_parameter *param,
bool want_bdev,
struct path *_path);
-extern int __lookup_constant(const struct constant_table tbl[], size_t tbl_size,
- const char *name, int not_found);
-#define lookup_constant(t, n, nf) __lookup_constant(t, ARRAY_SIZE(t), (n), (nf))
+extern int lookup_constant(const struct constant_table tbl[], const char *name, int not_found);
#ifdef CONFIG_VALIDATE_FS_PARSER
extern bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size,
int low, int high, int special);
-extern bool fs_validate_description(const struct fs_parameter_description *desc);
+extern bool fs_validate_description(const char *name,
+ const struct fs_parameter_spec *desc);
#else
static inline bool validate_constant_table(const struct constant_table *tbl, size_t tbl_size,
int low, int high, int special)
{ return true; }
-static inline bool fs_validate_description(const struct fs_parameter_description *desc)
+static inline bool fs_validate_description(const char *name,
+ const struct fs_parameter_spec *desc)
{ return true; }
#endif
@@ -115,33 +103,32 @@ static inline bool fs_validate_description(const struct fs_parameter_description
* work, but any such case is probably a sign that new helper is needed.
* Helpers will remain stable; low-level implementation may change.
*/
-#define __fsparam(TYPE, NAME, OPT, FLAGS) \
+#define __fsparam(TYPE, NAME, OPT, FLAGS, DATA) \
{ \
.name = NAME, \
.opt = OPT, \
.type = TYPE, \
- .flags = FLAGS \
+ .flags = FLAGS, \
+ .data = DATA \
}
-#define fsparam_flag(NAME, OPT) __fsparam(fs_param_is_flag, NAME, OPT, 0)
+#define fsparam_flag(NAME, OPT) __fsparam(NULL, NAME, OPT, 0, NULL)
#define fsparam_flag_no(NAME, OPT) \
- __fsparam(fs_param_is_flag, NAME, OPT, \
- fs_param_neg_with_no)
-#define fsparam_bool(NAME, OPT) __fsparam(fs_param_is_bool, NAME, OPT, 0)
-#define fsparam_u32(NAME, OPT) __fsparam(fs_param_is_u32, NAME, OPT, 0)
+ __fsparam(NULL, NAME, OPT, fs_param_neg_with_no, NULL)
+#define fsparam_bool(NAME, OPT) __fsparam(fs_param_is_bool, NAME, OPT, 0, NULL)
+#define fsparam_u32(NAME, OPT) __fsparam(fs_param_is_u32, NAME, OPT, 0, NULL)
#define fsparam_u32oct(NAME, OPT) \
- __fsparam(fs_param_is_u32_octal, NAME, OPT, 0)
+ __fsparam(fs_param_is_u32, NAME, OPT, 0, (void *)8)
#define fsparam_u32hex(NAME, OPT) \
- __fsparam(fs_param_is_u32_hex, NAME, OPT, 0)
-#define fsparam_s32(NAME, OPT) __fsparam(fs_param_is_s32, NAME, OPT, 0)
-#define fsparam_u64(NAME, OPT) __fsparam(fs_param_is_u64, NAME, OPT, 0)
-#define fsparam_enum(NAME, OPT) __fsparam(fs_param_is_enum, NAME, OPT, 0)
+ __fsparam(fs_param_is_u32_hex, NAME, OPT, 0, (void *16))
+#define fsparam_s32(NAME, OPT) __fsparam(fs_param_is_s32, NAME, OPT, 0, NULL)
+#define fsparam_u64(NAME, OPT) __fsparam(fs_param_is_u64, NAME, OPT, 0, NULL)
+#define fsparam_enum(NAME, OPT, array) __fsparam(fs_param_is_enum, NAME, OPT, 0, array)
#define fsparam_string(NAME, OPT) \
- __fsparam(fs_param_is_string, NAME, OPT, 0)
-#define fsparam_blob(NAME, OPT) __fsparam(fs_param_is_blob, NAME, OPT, 0)
-#define fsparam_bdev(NAME, OPT) __fsparam(fs_param_is_blockdev, NAME, OPT, 0)
-#define fsparam_path(NAME, OPT) __fsparam(fs_param_is_path, NAME, OPT, 0)
-#define fsparam_fd(NAME, OPT) __fsparam(fs_param_is_fd, NAME, OPT, 0)
-
+ __fsparam(fs_param_is_string, NAME, OPT, 0, NULL)
+#define fsparam_blob(NAME, OPT) __fsparam(fs_param_is_blob, NAME, OPT, 0, NULL)
+#define fsparam_bdev(NAME, OPT) __fsparam(fs_param_is_blockdev, NAME, OPT, 0, NULL)
+#define fsparam_path(NAME, OPT) __fsparam(fs_param_is_path, NAME, OPT, 0, NULL)
+#define fsparam_fd(NAME, OPT) __fsparam(fs_param_is_fd, NAME, OPT, 0, NULL)
#endif /* _LINUX_FS_PARSER_H */
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index f622f7460ed8..e3c2d2a15525 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -20,7 +20,6 @@
#define FS_CRYPTO_BLOCK_SIZE 16
-struct fscrypt_ctx;
struct fscrypt_info;
struct fscrypt_str {
@@ -62,18 +61,9 @@ struct fscrypt_operations {
bool (*dummy_context)(struct inode *);
bool (*empty_dir)(struct inode *);
unsigned int max_namelen;
-};
-
-/* Decryption work */
-struct fscrypt_ctx {
- union {
- struct {
- struct bio *bio;
- struct work_struct work;
- };
- struct list_head free_list; /* Free list */
- };
- u8 flags; /* Flags */
+ bool (*has_stable_inodes)(struct super_block *sb);
+ void (*get_ino_and_lblk_bits)(struct super_block *sb,
+ int *ino_bits_ret, int *lblk_bits_ret);
};
static inline bool fscrypt_has_encryption_key(const struct inode *inode)
@@ -82,6 +72,21 @@ static inline bool fscrypt_has_encryption_key(const struct inode *inode)
return READ_ONCE(inode->i_crypt_info) != NULL;
}
+/**
+ * fscrypt_needs_contents_encryption() - check whether an inode needs
+ * contents encryption
+ *
+ * Return: %true iff the inode is an encrypted regular file and the kernel was
+ * built with fscrypt support.
+ *
+ * If you need to know whether the encrypt bit is set even when the kernel was
+ * built without fscrypt support, you must use IS_ENCRYPTED() directly instead.
+ */
+static inline bool fscrypt_needs_contents_encryption(const struct inode *inode)
+{
+ return IS_ENCRYPTED(inode) && S_ISREG(inode->i_mode);
+}
+
static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
{
return inode->i_sb->s_cop->dummy_context &&
@@ -102,8 +107,6 @@ static inline void fscrypt_handle_d_move(struct dentry *dentry)
/* crypto.c */
extern void fscrypt_enqueue_decrypt_work(struct work_struct *);
-extern struct fscrypt_ctx *fscrypt_get_ctx(gfp_t);
-extern void fscrypt_release_ctx(struct fscrypt_ctx *);
extern struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
unsigned int len,
@@ -136,6 +139,7 @@ extern void fscrypt_free_bounce_page(struct page *bounce_page);
extern int fscrypt_ioctl_set_policy(struct file *, const void __user *);
extern int fscrypt_ioctl_get_policy(struct file *, void __user *);
extern int fscrypt_ioctl_get_policy_ex(struct file *, void __user *);
+extern int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg);
extern int fscrypt_has_permitted_context(struct inode *, struct inode *);
extern int fscrypt_inherit_context(struct inode *, struct inode *,
void *, bool);
@@ -165,87 +169,17 @@ static inline void fscrypt_free_filename(struct fscrypt_name *fname)
extern int fscrypt_fname_alloc_buffer(const struct inode *, u32,
struct fscrypt_str *);
extern void fscrypt_fname_free_buffer(struct fscrypt_str *);
-extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
- const struct fscrypt_str *, struct fscrypt_str *);
-
-#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32
-
-/* Extracts the second-to-last ciphertext block; see explanation below */
-#define FSCRYPT_FNAME_DIGEST(name, len) \
- ((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \
- FS_CRYPTO_BLOCK_SIZE))
-
-#define FSCRYPT_FNAME_DIGEST_SIZE FS_CRYPTO_BLOCK_SIZE
-
-/**
- * fscrypt_digested_name - alternate identifier for an on-disk filename
- *
- * When userspace lists an encrypted directory without access to the key,
- * filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE
- * bytes are shown in this abbreviated form (base64-encoded) rather than as the
- * full ciphertext (base64-encoded). This is necessary to allow supporting
- * filenames up to NAME_MAX bytes, since base64 encoding expands the length.
- *
- * To make it possible for filesystems to still find the correct directory entry
- * despite not knowing the full on-disk name, we encode any filesystem-specific
- * 'hash' and/or 'minor_hash' which the filesystem may need for its lookups,
- * followed by the second-to-last ciphertext block of the filename. Due to the
- * use of the CBC-CTS encryption mode, the second-to-last ciphertext block
- * depends on the full plaintext. (Note that ciphertext stealing causes the
- * last two blocks to appear "flipped".) This makes accidental collisions very
- * unlikely: just a 1 in 2^128 chance for two filenames to collide even if they
- * share the same filesystem-specific hashes.
- *
- * However, this scheme isn't immune to intentional collisions, which can be
- * created by anyone able to create arbitrary plaintext filenames and view them
- * without the key. Making the "digest" be a real cryptographic hash like
- * SHA-256 over the full ciphertext would prevent this, although it would be
- * less efficient and harder to implement, especially since the filesystem would
- * need to calculate it for each directory entry examined during a search.
- */
-struct fscrypt_digested_name {
- u32 hash;
- u32 minor_hash;
- u8 digest[FSCRYPT_FNAME_DIGEST_SIZE];
-};
-
-/**
- * fscrypt_match_name() - test whether the given name matches a directory entry
- * @fname: the name being searched for
- * @de_name: the name from the directory entry
- * @de_name_len: the length of @de_name in bytes
- *
- * Normally @fname->disk_name will be set, and in that case we simply compare
- * that to the name stored in the directory entry. The only exception is that
- * if we don't have the key for an encrypted directory and a filename in it is
- * very long, then we won't have the full disk_name and we'll instead need to
- * match against the fscrypt_digested_name.
- *
- * Return: %true if the name matches, otherwise %false.
- */
-static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
- const u8 *de_name, u32 de_name_len)
-{
- if (unlikely(!fname->disk_name.name)) {
- const struct fscrypt_digested_name *n =
- (const void *)fname->crypto_buf.name;
- if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_'))
- return false;
- if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE)
- return false;
- return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len),
- n->digest, FSCRYPT_FNAME_DIGEST_SIZE);
- }
-
- if (de_name_len != fname->disk_name.len)
- return false;
- return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
-}
+extern int fscrypt_fname_disk_to_usr(const struct inode *inode,
+ u32 hash, u32 minor_hash,
+ const struct fscrypt_str *iname,
+ struct fscrypt_str *oname);
+extern bool fscrypt_match_name(const struct fscrypt_name *fname,
+ const u8 *de_name, u32 de_name_len);
+extern u64 fscrypt_fname_siphash(const struct inode *dir,
+ const struct qstr *name);
/* bio.c */
extern void fscrypt_decrypt_bio(struct bio *);
-extern void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
- struct bio *bio);
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int);
@@ -260,6 +194,8 @@ extern int __fscrypt_prepare_rename(struct inode *old_dir,
unsigned int flags);
extern int __fscrypt_prepare_lookup(struct inode *dir, struct dentry *dentry,
struct fscrypt_name *fname);
+extern int fscrypt_prepare_setflags(struct inode *inode,
+ unsigned int oldflags, unsigned int flags);
extern int __fscrypt_prepare_symlink(struct inode *dir, unsigned int len,
unsigned int max_len,
struct fscrypt_str *disk_link);
@@ -281,6 +217,11 @@ static inline bool fscrypt_has_encryption_key(const struct inode *inode)
return false;
}
+static inline bool fscrypt_needs_contents_encryption(const struct inode *inode)
+{
+ return false;
+}
+
static inline bool fscrypt_dummy_context_enabled(struct inode *inode)
{
return false;
@@ -295,16 +236,6 @@ static inline void fscrypt_enqueue_decrypt_work(struct work_struct *work)
{
}
-static inline struct fscrypt_ctx *fscrypt_get_ctx(gfp_t gfp_flags)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void fscrypt_release_ctx(struct fscrypt_ctx *ctx)
-{
- return;
-}
-
static inline struct page *fscrypt_encrypt_pagecache_blocks(struct page *page,
unsigned int len,
unsigned int offs,
@@ -370,6 +301,11 @@ static inline int fscrypt_ioctl_get_policy_ex(struct file *filp,
return -EOPNOTSUPP;
}
+static inline int fscrypt_ioctl_get_nonce(struct file *filp, void __user *arg)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int fscrypt_has_permitted_context(struct inode *parent,
struct inode *child)
{
@@ -462,7 +398,7 @@ static inline void fscrypt_fname_free_buffer(struct fscrypt_str *crypto_str)
return;
}
-static inline int fscrypt_fname_disk_to_usr(struct inode *inode,
+static inline int fscrypt_fname_disk_to_usr(const struct inode *inode,
u32 hash, u32 minor_hash,
const struct fscrypt_str *iname,
struct fscrypt_str *oname)
@@ -479,13 +415,15 @@ static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
}
-/* bio.c */
-static inline void fscrypt_decrypt_bio(struct bio *bio)
+static inline u64 fscrypt_fname_siphash(const struct inode *dir,
+ const struct qstr *name)
{
+ WARN_ON_ONCE(1);
+ return 0;
}
-static inline void fscrypt_enqueue_decrypt_bio(struct fscrypt_ctx *ctx,
- struct bio *bio)
+/* bio.c */
+static inline void fscrypt_decrypt_bio(struct bio *bio)
{
}
@@ -526,6 +464,13 @@ static inline int __fscrypt_prepare_lookup(struct inode *dir,
return -EOPNOTSUPP;
}
+static inline int fscrypt_prepare_setflags(struct inode *inode,
+ unsigned int oldflags,
+ unsigned int flags)
+{
+ return 0;
+}
+
static inline int __fscrypt_prepare_symlink(struct inode *dir,
unsigned int len,
unsigned int max_len,
diff --git a/include/linux/fsl/enetc_mdio.h b/include/linux/fsl/enetc_mdio.h
new file mode 100644
index 000000000000..4875dd38af7e
--- /dev/null
+++ b/include/linux/fsl/enetc_mdio.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: (GPL-2.0+ OR BSD-3-Clause) */
+/* Copyright 2019 NXP */
+
+#ifndef _FSL_ENETC_MDIO_H_
+#define _FSL_ENETC_MDIO_H_
+
+#include <linux/phy.h>
+
+/* PCS registers */
+#define ENETC_PCS_LINK_TIMER1 0x12
+#define ENETC_PCS_LINK_TIMER1_VAL 0x06a0
+#define ENETC_PCS_LINK_TIMER2 0x13
+#define ENETC_PCS_LINK_TIMER2_VAL 0x0003
+#define ENETC_PCS_IF_MODE 0x14
+#define ENETC_PCS_IF_MODE_SGMII_EN BIT(0)
+#define ENETC_PCS_IF_MODE_USE_SGMII_AN BIT(1)
+#define ENETC_PCS_IF_MODE_SGMII_SPEED(x) (((x) << 2) & GENMASK(3, 2))
+
+/* Not a mistake, the SerDes PLL needs to be set at 3.125 GHz by Reset
+ * Configuration Word (RCW, outside Linux control) for 2.5G SGMII mode. The PCS
+ * still thinks it's at gigabit.
+ */
+enum enetc_pcs_speed {
+ ENETC_PCS_SPEED_10 = 0,
+ ENETC_PCS_SPEED_100 = 1,
+ ENETC_PCS_SPEED_1000 = 2,
+ ENETC_PCS_SPEED_2500 = 2,
+};
+
+struct enetc_hw;
+
+struct enetc_mdio_priv {
+ struct enetc_hw *hw;
+ int mdio_base;
+};
+
+#if IS_REACHABLE(CONFIG_FSL_ENETC_MDIO)
+
+int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum);
+int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value);
+struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs);
+
+#else
+
+static inline int enetc_mdio_read(struct mii_bus *bus, int phy_id, int regnum)
+{ return -EINVAL; }
+static inline int enetc_mdio_write(struct mii_bus *bus, int phy_id, int regnum,
+ u16 value)
+{ return -EINVAL; }
+struct enetc_hw *enetc_hw_alloc(struct device *dev, void __iomem *port_regs)
+{ return ERR_PTR(-EINVAL); }
+
+#endif
+
+#endif
diff --git a/include/linux/fsl/mc.h b/include/linux/fsl/mc.h
index 975553a9f75d..54d9436600c7 100644
--- a/include/linux/fsl/mc.h
+++ b/include/linux/fsl/mc.h
@@ -403,6 +403,8 @@ int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev);
void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev);
+struct fsl_mc_device *fsl_mc_get_endpoint(struct fsl_mc_device *mc_dev);
+
extern struct bus_type fsl_mc_bus_type;
extern struct device_type fsl_mc_bus_dprc_type;
diff --git a/include/linux/fsl/ptp_qoriq.h b/include/linux/fsl/ptp_qoriq.h
index 992bf9fa1729..b0b743563f43 100644
--- a/include/linux/fsl/ptp_qoriq.h
+++ b/include/linux/fsl/ptp_qoriq.h
@@ -192,6 +192,7 @@ int ptp_qoriq_settime(struct ptp_clock_info *ptp,
const struct timespec64 *ts);
int ptp_qoriq_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on);
+int extts_clean_up(struct ptp_qoriq *ptp_qoriq, int index, bool update_event);
#ifdef CONFIG_DEBUG_FS
void ptp_qoriq_create_debugfs(struct ptp_qoriq *ptp_qoriq);
void ptp_qoriq_remove_debugfs(struct ptp_qoriq *ptp_qoriq);
diff --git a/include/linux/fsverity.h b/include/linux/fsverity.h
index 3b6b8ccebe7d..ecc604e61d61 100644
--- a/include/linux/fsverity.h
+++ b/include/linux/fsverity.h
@@ -77,6 +77,10 @@ struct fsverity_operations {
*
* @inode: the inode
* @index: 0-based index of the page within the Merkle tree
+ * @num_ra_pages: The number of Merkle tree pages that should be
+ * prefetched starting at @index if the page at @index
+ * isn't already cached. Implementations may ignore this
+ * argument; it's only a performance optimization.
*
* This can be called at any time on an open verity file, as well as
* between ->begin_enable_verity() and ->end_enable_verity(). It may be
@@ -87,7 +91,8 @@ struct fsverity_operations {
* Return: the page on success, ERR_PTR() on failure
*/
struct page *(*read_merkle_tree_page)(struct inode *inode,
- pgoff_t index);
+ pgoff_t index,
+ unsigned long num_ra_pages);
/**
* Write a Merkle tree block to the given inode.
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 8a8cb3c401b2..db95244a62d4 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -51,6 +51,7 @@ static inline void early_trace_init(void) { }
struct module;
struct ftrace_hash;
+struct ftrace_direct_func;
#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_MODULES) && \
defined(CONFIG_DYNAMIC_FTRACE)
@@ -142,24 +143,30 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* PID - Is affected by set_ftrace_pid (allows filtering on those pids)
* RCU - Set when the ops can only be called when RCU is watching.
* TRACE_ARRAY - The ops->private points to a trace_array descriptor.
+ * PERMANENT - Set when the ops is permanent and should not be affected by
+ * ftrace_enabled.
+ * DIRECT - Used by the direct ftrace_ops helper for direct functions
+ * (internal ftrace only, should not be used by others)
*/
enum {
- FTRACE_OPS_FL_ENABLED = 1 << 0,
- FTRACE_OPS_FL_DYNAMIC = 1 << 1,
- FTRACE_OPS_FL_SAVE_REGS = 1 << 2,
- FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 3,
- FTRACE_OPS_FL_RECURSION_SAFE = 1 << 4,
- FTRACE_OPS_FL_STUB = 1 << 5,
- FTRACE_OPS_FL_INITIALIZED = 1 << 6,
- FTRACE_OPS_FL_DELETED = 1 << 7,
- FTRACE_OPS_FL_ADDING = 1 << 8,
- FTRACE_OPS_FL_REMOVING = 1 << 9,
- FTRACE_OPS_FL_MODIFYING = 1 << 10,
- FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 11,
- FTRACE_OPS_FL_IPMODIFY = 1 << 12,
- FTRACE_OPS_FL_PID = 1 << 13,
- FTRACE_OPS_FL_RCU = 1 << 14,
- FTRACE_OPS_FL_TRACE_ARRAY = 1 << 15,
+ FTRACE_OPS_FL_ENABLED = BIT(0),
+ FTRACE_OPS_FL_DYNAMIC = BIT(1),
+ FTRACE_OPS_FL_SAVE_REGS = BIT(2),
+ FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = BIT(3),
+ FTRACE_OPS_FL_RECURSION_SAFE = BIT(4),
+ FTRACE_OPS_FL_STUB = BIT(5),
+ FTRACE_OPS_FL_INITIALIZED = BIT(6),
+ FTRACE_OPS_FL_DELETED = BIT(7),
+ FTRACE_OPS_FL_ADDING = BIT(8),
+ FTRACE_OPS_FL_REMOVING = BIT(9),
+ FTRACE_OPS_FL_MODIFYING = BIT(10),
+ FTRACE_OPS_FL_ALLOC_TRAMP = BIT(11),
+ FTRACE_OPS_FL_IPMODIFY = BIT(12),
+ FTRACE_OPS_FL_PID = BIT(13),
+ FTRACE_OPS_FL_RCU = BIT(14),
+ FTRACE_OPS_FL_TRACE_ARRAY = BIT(15),
+ FTRACE_OPS_FL_PERMANENT = BIT(16),
+ FTRACE_OPS_FL_DIRECT = BIT(17),
};
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -239,6 +246,75 @@ static inline void ftrace_free_init_mem(void) { }
static inline void ftrace_free_mem(struct module *mod, void *start, void *end) { }
#endif /* CONFIG_FUNCTION_TRACER */
+struct ftrace_func_entry {
+ struct hlist_node hlist;
+ unsigned long ip;
+ unsigned long direct; /* for direct lookup only */
+};
+
+struct dyn_ftrace;
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+extern int ftrace_direct_func_count;
+int register_ftrace_direct(unsigned long ip, unsigned long addr);
+int unregister_ftrace_direct(unsigned long ip, unsigned long addr);
+int modify_ftrace_direct(unsigned long ip, unsigned long old_addr, unsigned long new_addr);
+struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr);
+int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
+ struct dyn_ftrace *rec,
+ unsigned long old_addr,
+ unsigned long new_addr);
+unsigned long ftrace_find_rec_direct(unsigned long ip);
+#else
+# define ftrace_direct_func_count 0
+static inline int register_ftrace_direct(unsigned long ip, unsigned long addr)
+{
+ return -ENOTSUPP;
+}
+static inline int unregister_ftrace_direct(unsigned long ip, unsigned long addr)
+{
+ return -ENOTSUPP;
+}
+static inline int modify_ftrace_direct(unsigned long ip,
+ unsigned long old_addr, unsigned long new_addr)
+{
+ return -ENOTSUPP;
+}
+static inline struct ftrace_direct_func *ftrace_find_direct_func(unsigned long addr)
+{
+ return NULL;
+}
+static inline int ftrace_modify_direct_caller(struct ftrace_func_entry *entry,
+ struct dyn_ftrace *rec,
+ unsigned long old_addr,
+ unsigned long new_addr)
+{
+ return -ENODEV;
+}
+static inline unsigned long ftrace_find_rec_direct(unsigned long ip)
+{
+ return 0;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+
+#ifndef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+/*
+ * This must be implemented by the architecture.
+ * It is the way the ftrace direct_ops helper, when called
+ * via ftrace (because there's other callbacks besides the
+ * direct call), can inform the architecture's trampoline that this
+ * routine has a direct caller, and what the caller is.
+ *
+ * For example, in x86, it returns the direct caller
+ * callback function via the regs->orig_ax parameter.
+ * Then in the ftrace trampoline, if this is set, it makes
+ * the return from the trampoline jump to the direct caller
+ * instead of going back to the function it just traced.
+ */
+static inline void arch_ftrace_set_direct_caller(struct pt_regs *regs,
+ unsigned long addr) { }
+#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
+
#ifdef CONFIG_STACK_TRACER
extern int stack_tracer_enabled;
@@ -291,8 +367,6 @@ static inline void stack_tracer_enable(void) { }
int ftrace_arch_code_modify_prepare(void);
int ftrace_arch_code_modify_post_process(void);
-struct dyn_ftrace;
-
enum ftrace_bug_type {
FTRACE_BUG_UNKNOWN,
FTRACE_BUG_INIT,
@@ -330,6 +404,7 @@ bool is_ftrace_trampoline(unsigned long addr);
* REGS_EN - the function is set up to save regs.
* IPMODIFY - the record allows for the IP address to be changed.
* DISABLED - the record is not ready to be touched yet
+ * DIRECT - there is a direct function to call
*
* When a new ftrace_ops is registered and wants a function to save
* pt_regs, the rec->flag REGS is set. When the function has been
@@ -345,10 +420,12 @@ enum {
FTRACE_FL_TRAMP_EN = (1UL << 27),
FTRACE_FL_IPMODIFY = (1UL << 26),
FTRACE_FL_DISABLED = (1UL << 25),
+ FTRACE_FL_DIRECT = (1UL << 24),
+ FTRACE_FL_DIRECT_EN = (1UL << 23),
};
-#define FTRACE_REF_MAX_SHIFT 25
-#define FTRACE_FL_BITS 7
+#define FTRACE_REF_MAX_SHIFT 23
+#define FTRACE_FL_BITS 9
#define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
#define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
#define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
@@ -499,7 +576,7 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
/**
* ftrace_make_nop - convert code into nop
* @mod: module structure if called by module load initialization
- * @rec: the mcount call site record
+ * @rec: the call site record (e.g. mcount/fentry)
* @addr: the address that the call site should be calling
*
* This is a very sensitive operation and great care needs
@@ -520,9 +597,38 @@ static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
extern int ftrace_make_nop(struct module *mod,
struct dyn_ftrace *rec, unsigned long addr);
+
+/**
+ * ftrace_init_nop - initialize a nop call site
+ * @mod: module structure if called by module load initialization
+ * @rec: the call site record (e.g. mcount/fentry)
+ *
+ * This is a very sensitive operation and great care needs
+ * to be taken by the arch. The operation should carefully
+ * read the location, check to see if what is read is indeed
+ * what we expect it to be, and then on success of the compare,
+ * it should write to the location.
+ *
+ * The code segment at @rec->ip should contain the contents created by
+ * the compiler
+ *
+ * Return must be:
+ * 0 on success
+ * -EFAULT on error reading the location
+ * -EINVAL on a failed compare of the contents
+ * -EPERM on error writing to the location
+ * Any other value will be considered a failure.
+ */
+#ifndef ftrace_init_nop
+static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
+{
+ return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
+}
+#endif
+
/**
* ftrace_make_call - convert a nop call site into a call to addr
- * @rec: the mcount call site record
+ * @rec: the call site record (e.g. mcount/fentry)
* @addr: the address that the call site should call
*
* This is a very sensitive operation and great care needs
@@ -545,7 +651,7 @@ extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
/**
* ftrace_modify_call - convert from one addr to another (no nop)
- * @rec: the mcount call site record
+ * @rec: the call site record (e.g. mcount/fentry)
* @old_addr: the address expected to be currently called to
* @addr: the address to change to
*
@@ -709,6 +815,11 @@ static inline unsigned long get_lock_parent_ip(void)
#ifdef CONFIG_FTRACE_MCOUNT_RECORD
extern void ftrace_init(void);
+#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
+#define FTRACE_CALLSITE_SECTION "__patchable_function_entries"
+#else
+#define FTRACE_CALLSITE_SECTION "__mcount_loc"
+#endif
#else
static inline void ftrace_init(void) { }
#endif
diff --git a/include/linux/futex.h b/include/linux/futex.h
index ccaef0097785..b70df27d7e85 100644
--- a/include/linux/futex.h
+++ b/include/linux/futex.h
@@ -2,7 +2,9 @@
#ifndef _LINUX_FUTEX_H
#define _LINUX_FUTEX_H
+#include <linux/sched.h>
#include <linux/ktime.h>
+
#include <uapi/linux/futex.h>
struct inode;
@@ -29,34 +31,57 @@ struct task_struct;
union futex_key {
struct {
+ u64 i_seq;
unsigned long pgoff;
- struct inode *inode;
- int offset;
+ unsigned int offset;
} shared;
struct {
+ union {
+ struct mm_struct *mm;
+ u64 __tmp;
+ };
unsigned long address;
- struct mm_struct *mm;
- int offset;
+ unsigned int offset;
} private;
struct {
+ u64 ptr;
unsigned long word;
- void *ptr;
- int offset;
+ unsigned int offset;
} both;
};
-#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = NULL } }
+#define FUTEX_KEY_INIT (union futex_key) { .both = { .ptr = 0ULL } }
#ifdef CONFIG_FUTEX
-extern void exit_robust_list(struct task_struct *curr);
+enum {
+ FUTEX_STATE_OK,
+ FUTEX_STATE_EXITING,
+ FUTEX_STATE_DEAD,
+};
-long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
- u32 __user *uaddr2, u32 val2, u32 val3);
-#else
-static inline void exit_robust_list(struct task_struct *curr)
+static inline void futex_init_task(struct task_struct *tsk)
{
+ tsk->robust_list = NULL;
+#ifdef CONFIG_COMPAT
+ tsk->compat_robust_list = NULL;
+#endif
+ INIT_LIST_HEAD(&tsk->pi_state_list);
+ tsk->pi_state_cache = NULL;
+ tsk->futex_state = FUTEX_STATE_OK;
+ mutex_init(&tsk->futex_exit_mutex);
}
+void futex_exit_recursive(struct task_struct *tsk);
+void futex_exit_release(struct task_struct *tsk);
+void futex_exec_release(struct task_struct *tsk);
+
+long do_futex(u32 __user *uaddr, int op, u32 val, ktime_t *timeout,
+ u32 __user *uaddr2, u32 val2, u32 val3);
+#else
+static inline void futex_init_task(struct task_struct *tsk) { }
+static inline void futex_exit_recursive(struct task_struct *tsk) { }
+static inline void futex_exit_release(struct task_struct *tsk) { }
+static inline void futex_exec_release(struct task_struct *tsk) { }
static inline long do_futex(u32 __user *uaddr, int op, u32 val,
ktime_t *timeout, u32 __user *uaddr2,
u32 val2, u32 val3)
@@ -65,12 +90,4 @@ static inline long do_futex(u32 __user *uaddr, int op, u32 val,
}
#endif
-#ifdef CONFIG_FUTEX_PI
-extern void exit_pi_state_list(struct task_struct *curr);
-#else
-static inline void exit_pi_state_list(struct task_struct *curr)
-{
-}
-#endif
-
#endif
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index ababd6bc82f3..e0abafbb17f8 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -17,6 +17,7 @@ struct device;
struct fwnode_handle {
struct fwnode_handle *secondary;
const struct fwnode_operations *ops;
+ struct device *dev;
};
/**
@@ -49,13 +50,15 @@ struct fwnode_reference_args {
* struct fwnode_operations - Operations for fwnode interface
* @get: Get a reference to an fwnode.
* @put: Put a reference to an fwnode.
+ * @device_is_available: Return true if the device is available.
* @device_get_match_data: Return the device driver match data.
* @property_present: Return true if a property is present.
- * @property_read_integer_array: Read an array of integer properties. Return
- * zero on success, a negative error code
- * otherwise.
+ * @property_read_int_array: Read an array of integer properties. Return zero on
+ * success, a negative error code otherwise.
* @property_read_string_array: Read an array of string properties. Return zero
* on success, a negative error code otherwise.
+ * @get_name: Return the name of an fwnode.
+ * @get_name_prefix: Get a prefix for a node (for printing purposes).
* @get_parent: Return the parent of an fwnode.
* @get_next_child_node: Return the next child node in an iteration.
* @get_named_child_node: Return a child node with a given name.
@@ -65,6 +68,44 @@ struct fwnode_reference_args {
* endpoint node.
* @graph_get_port_parent: Return the parent node of a port node.
* @graph_parse_endpoint: Parse endpoint for port and endpoint id.
+ * @add_links: Called after the device corresponding to the fwnode is added
+ * using device_add(). The function is expected to create device
+ * links to all the suppliers of the device that are available at
+ * the time this function is called. The function must NOT stop
+ * at the first failed device link if other unlinked supplier
+ * devices are present in the system. This is necessary for the
+ * driver/bus sync_state() callbacks to work correctly.
+ *
+ * For example, say Device-C depends on suppliers Device-S1 and
+ * Device-S2 and the dependency is listed in that order in the
+ * firmware. Say, S1 gets populated from the firmware after
+ * late_initcall_sync(). Say S2 is populated and probed way
+ * before that in device_initcall(). When C is populated, if this
+ * add_links() function doesn't continue past a "failed linking to
+ * S1" and continue linking C to S2, then S2 will get a
+ * sync_state() callback before C is probed. This is because from
+ * the perspective of S2, C was never a consumer when its
+ * sync_state() evaluation is done. To avoid this, the add_links()
+ * function has to go through all available suppliers of the
+ * device (that corresponds to this fwnode) and link to them
+ * before returning.
+ *
+ * If some suppliers are not yet available (indicated by an error
+ * return value), this function will be called again when other
+ * devices are added to allow creating device links to any newly
+ * available suppliers.
+ *
+ * Return 0 if device links have been successfully created to all
+ * the known suppliers of this device or if the supplier
+ * information is not known.
+ *
+ * Return -ENODEV if the suppliers needed for probing this device
+ * have not been registered yet (because device links can only be
+ * created to devices registered with the driver core).
+ *
+ * Return -EAGAIN if some of the suppliers of this device have not
+ * been registered yet, but none of those suppliers are necessary
+ * for probing the device.
*/
struct fwnode_operations {
struct fwnode_handle *(*get)(struct fwnode_handle *fwnode);
@@ -82,6 +123,8 @@ struct fwnode_operations {
(*property_read_string_array)(const struct fwnode_handle *fwnode_handle,
const char *propname, const char **val,
size_t nval);
+ const char *(*get_name)(const struct fwnode_handle *fwnode);
+ const char *(*get_name_prefix)(const struct fwnode_handle *fwnode);
struct fwnode_handle *(*get_parent)(const struct fwnode_handle *fwnode);
struct fwnode_handle *
(*get_next_child_node)(const struct fwnode_handle *fwnode,
@@ -102,6 +145,8 @@ struct fwnode_operations {
(*graph_get_port_parent)(struct fwnode_handle *fwnode);
int (*graph_parse_endpoint)(const struct fwnode_handle *fwnode,
struct fwnode_endpoint *endpoint);
+ int (*add_links)(const struct fwnode_handle *fwnode,
+ struct device *dev);
};
#define fwnode_has_op(fwnode, op) \
@@ -123,5 +168,8 @@ struct fwnode_operations {
if (fwnode_has_op(fwnode, op)) \
(fwnode)->ops->op(fwnode, ## __VA_ARGS__); \
} while (false)
+#define get_dev_from_fwnode(fwnode) get_device((fwnode)->dev)
+
+extern u32 fw_devlink_get_flags(void);
#endif
diff --git a/include/linux/genalloc.h b/include/linux/genalloc.h
index 4bd583bd6934..5b14a0f38124 100644
--- a/include/linux/genalloc.h
+++ b/include/linux/genalloc.h
@@ -206,7 +206,7 @@ extern struct gen_pool *devm_gen_pool_create(struct device *dev,
int min_alloc_order, int nid, const char *name);
extern struct gen_pool *gen_pool_get(struct device *dev, const char *name);
-bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
+extern bool gen_pool_has_addr(struct gen_pool *pool, unsigned long start,
size_t size);
#ifdef CONFIG_OF
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 8b5330dd5ac0..9b3fffdf4011 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -27,39 +27,8 @@
#define part_to_dev(part) (&((part)->__dev))
extern struct device_type part_type;
-extern struct kobject *block_depr;
extern struct class block_class;
-enum {
-/* These three have identical behaviour; use the second one if DOS FDISK gets
- confused about extended/logical partitions starting past cylinder 1023. */
- DOS_EXTENDED_PARTITION = 5,
- LINUX_EXTENDED_PARTITION = 0x85,
- WIN98_EXTENDED_PARTITION = 0x0f,
-
- SUN_WHOLE_DISK = DOS_EXTENDED_PARTITION,
-
- LINUX_SWAP_PARTITION = 0x82,
- LINUX_DATA_PARTITION = 0x83,
- LINUX_LVM_PARTITION = 0x8e,
- LINUX_RAID_PARTITION = 0xfd, /* autodetect RAID partition */
-
- SOLARIS_X86_PARTITION = LINUX_SWAP_PARTITION,
- NEW_SOLARIS_X86_PARTITION = 0xbf,
-
- DM6_AUX1PARTITION = 0x51, /* no DDO: use xlated geom */
- DM6_AUX3PARTITION = 0x53, /* no DDO: use xlated geom */
- DM6_PARTITION = 0x54, /* has DDO: use xlated geom & offset */
- EZD_PARTITION = 0x55, /* EZ-DRIVE */
-
- FREEBSD_PARTITION = 0xa5, /* FreeBSD Partition ID */
- OPENBSD_PARTITION = 0xa6, /* OpenBSD Partition ID */
- NETBSD_PARTITION = 0xa9, /* NetBSD Partition ID */
- BSDI_PARTITION = 0xb7, /* BSDI Partition ID */
- MINIX_PARTITION = 0x81, /* Minix Partition ID */
- UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */
-};
-
#define DISK_MAX_PARTS 256
#define DISK_NAME_LEN 32
@@ -70,26 +39,12 @@ enum {
#include <linux/fs.h>
#include <linux/workqueue.h>
-struct partition {
- unsigned char boot_ind; /* 0x80 - active */
- unsigned char head; /* starting head */
- unsigned char sector; /* starting sector */
- unsigned char cyl; /* starting cylinder */
- unsigned char sys_ind; /* What partition type */
- unsigned char end_head; /* end head */
- unsigned char end_sector; /* end sector */
- unsigned char end_cyl; /* end cylinder */
- __le32 start_sect; /* starting sector counting from 0 */
- __le32 nr_sects; /* nr of sectors in partition */
-} __attribute__((packed));
-
struct disk_stats {
u64 nsecs[NR_STAT_GROUPS];
unsigned long sectors[NR_STAT_GROUPS];
unsigned long ios[NR_STAT_GROUPS];
unsigned long merges[NR_STAT_GROUPS];
unsigned long io_ticks;
- unsigned long time_in_queue;
local_t in_flight[2];
};
@@ -133,17 +88,64 @@ struct hd_struct {
struct rcu_work rcu_work;
};
-#define GENHD_FL_REMOVABLE 1
-/* 2 is unused */
-#define GENHD_FL_MEDIA_CHANGE_NOTIFY 4
-#define GENHD_FL_CD 8
-#define GENHD_FL_UP 16
-#define GENHD_FL_SUPPRESS_PARTITION_INFO 32
-#define GENHD_FL_EXT_DEVT 64 /* allow extended devt */
-#define GENHD_FL_NATIVE_CAPACITY 128
-#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 256
-#define GENHD_FL_NO_PART_SCAN 512
-#define GENHD_FL_HIDDEN 1024
+/**
+ * DOC: genhd capability flags
+ *
+ * ``GENHD_FL_REMOVABLE`` (0x0001): indicates that the block device
+ * gives access to removable media.
+ * When set, the device remains present even when media is not
+ * inserted.
+ * Must not be set for devices which are removed entirely when the
+ * media is removed.
+ *
+ * ``GENHD_FL_CD`` (0x0008): the block device is a CD-ROM-style
+ * device.
+ * Affects responses to the ``CDROM_GET_CAPABILITY`` ioctl.
+ *
+ * ``GENHD_FL_UP`` (0x0010): indicates that the block device is "up",
+ * with a similar meaning to network interfaces.
+ *
+ * ``GENHD_FL_SUPPRESS_PARTITION_INFO`` (0x0020): don't include
+ * partition information in ``/proc/partitions`` or in the output of
+ * printk_all_partitions().
+ * Used for the null block device and some MMC devices.
+ *
+ * ``GENHD_FL_EXT_DEVT`` (0x0040): the driver supports extended
+ * dynamic ``dev_t``, i.e. it wants extended device numbers
+ * (``BLOCK_EXT_MAJOR``).
+ * This affects the maximum number of partitions.
+ *
+ * ``GENHD_FL_NATIVE_CAPACITY`` (0x0080): based on information in the
+ * partition table, the device's capacity has been extended to its
+ * native capacity; i.e. the device has hidden capacity used by one
+ * of the partitions (this is a flag used so that native capacity is
+ * only ever unlocked once).
+ *
+ * ``GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE`` (0x0100): event polling is
+ * blocked whenever a writer holds an exclusive lock.
+ *
+ * ``GENHD_FL_NO_PART_SCAN`` (0x0200): partition scanning is disabled.
+ * Used for loop devices in their default settings and some MMC
+ * devices.
+ *
+ * ``GENHD_FL_HIDDEN`` (0x0400): the block device is hidden; it
+ * doesn't produce events, doesn't appear in sysfs, and doesn't have
+ * an associated ``bdev``.
+ * Implies ``GENHD_FL_SUPPRESS_PARTITION_INFO`` and
+ * ``GENHD_FL_NO_PART_SCAN``.
+ * Used for multipath devices.
+ */
+#define GENHD_FL_REMOVABLE 0x0001
+/* 2 is unused (used to be GENHD_FL_DRIVERFS) */
+/* 4 is unused (used to be GENHD_FL_MEDIA_CHANGE_NOTIFY) */
+#define GENHD_FL_CD 0x0008
+#define GENHD_FL_UP 0x0010
+#define GENHD_FL_SUPPRESS_PARTITION_INFO 0x0020
+#define GENHD_FL_EXT_DEVT 0x0040
+#define GENHD_FL_NATIVE_CAPACITY 0x0080
+#define GENHD_FL_BLOCK_EVENTS_ON_EXCL_WRITE 0x0100
+#define GENHD_FL_NO_PART_SCAN 0x0200
+#define GENHD_FL_HIDDEN 0x0400
enum {
DISK_EVENT_MEDIA_CHANGE = 1 << 0, /* media changed */
@@ -189,7 +191,6 @@ struct gendisk {
* disks that can't be partitioned. */
char disk_name[DISK_NAME_LEN]; /* name of major driver */
- char *(*devnode)(struct gendisk *gd, umode_t *mode);
unsigned short events; /* supported events */
unsigned short event_flags; /* flags related to event processing */
@@ -283,143 +284,7 @@ extern void disk_part_iter_init(struct disk_part_iter *piter,
struct gendisk *disk, unsigned int flags);
extern struct hd_struct *disk_part_iter_next(struct disk_part_iter *piter);
extern void disk_part_iter_exit(struct disk_part_iter *piter);
-
-extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
- sector_t sector);
-
-/*
- * Macros to operate on percpu disk statistics:
- *
- * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters
- * and should be called between disk_stat_lock() and
- * disk_stat_unlock().
- *
- * part_stat_read() can be called at any time.
- *
- * part_stat_{add|set_all}() and {init|free}_part_stats are for
- * internal use only.
- */
-#ifdef CONFIG_SMP
-#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); })
-#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0)
-
-#define part_stat_get_cpu(part, field, cpu) \
- (per_cpu_ptr((part)->dkstats, (cpu))->field)
-
-#define part_stat_get(part, field) \
- part_stat_get_cpu(part, field, smp_processor_id())
-
-#define part_stat_read(part, field) \
-({ \
- typeof((part)->dkstats->field) res = 0; \
- unsigned int _cpu; \
- for_each_possible_cpu(_cpu) \
- res += per_cpu_ptr((part)->dkstats, _cpu)->field; \
- res; \
-})
-
-static inline void part_stat_set_all(struct hd_struct *part, int value)
-{
- int i;
-
- for_each_possible_cpu(i)
- memset(per_cpu_ptr(part->dkstats, i), value,
- sizeof(struct disk_stats));
-}
-
-static inline int init_part_stats(struct hd_struct *part)
-{
- part->dkstats = alloc_percpu(struct disk_stats);
- if (!part->dkstats)
- return 0;
- return 1;
-}
-
-static inline void free_part_stats(struct hd_struct *part)
-{
- free_percpu(part->dkstats);
-}
-
-#else /* !CONFIG_SMP */
-#define part_stat_lock() ({ rcu_read_lock(); 0; })
-#define part_stat_unlock() rcu_read_unlock()
-
-#define part_stat_get(part, field) ((part)->dkstats.field)
-#define part_stat_get_cpu(part, field, cpu) part_stat_get(part, field)
-#define part_stat_read(part, field) part_stat_get(part, field)
-
-static inline void part_stat_set_all(struct hd_struct *part, int value)
-{
- memset(&part->dkstats, value, sizeof(struct disk_stats));
-}
-
-static inline int init_part_stats(struct hd_struct *part)
-{
- return 1;
-}
-
-static inline void free_part_stats(struct hd_struct *part)
-{
-}
-
-#endif /* CONFIG_SMP */
-
-#define part_stat_read_msecs(part, which) \
- div_u64(part_stat_read(part, nsecs[which]), NSEC_PER_MSEC)
-
-#define part_stat_read_accum(part, field) \
- (part_stat_read(part, field[STAT_READ]) + \
- part_stat_read(part, field[STAT_WRITE]) + \
- part_stat_read(part, field[STAT_DISCARD]))
-
-#define __part_stat_add(part, field, addnd) \
- (part_stat_get(part, field) += (addnd))
-
-#define part_stat_add(part, field, addnd) do { \
- __part_stat_add((part), field, addnd); \
- if ((part)->partno) \
- __part_stat_add(&part_to_disk((part))->part0, \
- field, addnd); \
-} while (0)
-
-#define part_stat_dec(gendiskp, field) \
- part_stat_add(gendiskp, field, -1)
-#define part_stat_inc(gendiskp, field) \
- part_stat_add(gendiskp, field, 1)
-#define part_stat_sub(gendiskp, field, subnd) \
- part_stat_add(gendiskp, field, -subnd)
-
-#define part_stat_local_dec(gendiskp, field) \
- local_dec(&(part_stat_get(gendiskp, field)))
-#define part_stat_local_inc(gendiskp, field) \
- local_inc(&(part_stat_get(gendiskp, field)))
-#define part_stat_local_read(gendiskp, field) \
- local_read(&(part_stat_get(gendiskp, field)))
-#define part_stat_local_read_cpu(gendiskp, field, cpu) \
- local_read(&(part_stat_get_cpu(gendiskp, field, cpu)))
-
-unsigned int part_in_flight(struct request_queue *q, struct hd_struct *part);
-void part_in_flight_rw(struct request_queue *q, struct hd_struct *part,
- unsigned int inflight[2]);
-void part_dec_in_flight(struct request_queue *q, struct hd_struct *part,
- int rw);
-void part_inc_in_flight(struct request_queue *q, struct hd_struct *part,
- int rw);
-
-static inline struct partition_meta_info *alloc_part_info(struct gendisk *disk)
-{
- if (disk)
- return kzalloc_node(sizeof(struct partition_meta_info),
- GFP_KERNEL, disk->node_id);
- return kzalloc(sizeof(struct partition_meta_info), GFP_KERNEL);
-}
-
-static inline void free_part_info(struct hd_struct *part)
-{
- kfree(part->info);
-}
-
-void update_io_ticks(struct hd_struct *part, unsigned long now);
+extern bool disk_has_partitions(struct gendisk *disk);
/* block/genhd.c */
extern void device_add_disk(struct device *parent, struct gendisk *disk,
@@ -449,6 +314,8 @@ static inline int get_disk_ro(struct gendisk *disk)
extern void disk_block_events(struct gendisk *disk);
extern void disk_unblock_events(struct gendisk *disk);
extern void disk_flush_events(struct gendisk *disk, unsigned int mask);
+extern void set_capacity_revalidate_and_notify(struct gendisk *disk,
+ sector_t size, bool revalidate);
extern unsigned int disk_clear_events(struct gendisk *disk, unsigned int mask);
/* drivers/char/random.c */
@@ -468,169 +335,11 @@ static inline void set_capacity(struct gendisk *disk, sector_t size)
disk->part0.nr_sects = size;
}
-#ifdef CONFIG_SOLARIS_X86_PARTITION
-
-#define SOLARIS_X86_NUMSLICE 16
-#define SOLARIS_X86_VTOC_SANE (0x600DDEEEUL)
-
-struct solaris_x86_slice {
- __le16 s_tag; /* ID tag of partition */
- __le16 s_flag; /* permission flags */
- __le32 s_start; /* start sector no of partition */
- __le32 s_size; /* # of blocks in partition */
-};
-
-struct solaris_x86_vtoc {
- unsigned int v_bootinfo[3]; /* info needed by mboot (unsupported) */
- __le32 v_sanity; /* to verify vtoc sanity */
- __le32 v_version; /* layout version */
- char v_volume[8]; /* volume name */
- __le16 v_sectorsz; /* sector size in bytes */
- __le16 v_nparts; /* number of partitions */
- unsigned int v_reserved[10]; /* free space */
- struct solaris_x86_slice
- v_slice[SOLARIS_X86_NUMSLICE]; /* slice headers */
- unsigned int timestamp[SOLARIS_X86_NUMSLICE]; /* timestamp (unsupported) */
- char v_asciilabel[128]; /* for compatibility */
-};
-
-#endif /* CONFIG_SOLARIS_X86_PARTITION */
-
-#ifdef CONFIG_BSD_DISKLABEL
-/*
- * BSD disklabel support by Yossi Gottlieb <yogo@math.tau.ac.il>
- * updated by Marc Espie <Marc.Espie@openbsd.org>
- */
-
-/* check against BSD src/sys/sys/disklabel.h for consistency */
-
-#define BSD_DISKMAGIC (0x82564557UL) /* The disk magic number */
-#define BSD_MAXPARTITIONS 16
-#define OPENBSD_MAXPARTITIONS 16
-#define BSD_FS_UNUSED 0 /* disklabel unused partition entry ID */
-struct bsd_disklabel {
- __le32 d_magic; /* the magic number */
- __s16 d_type; /* drive type */
- __s16 d_subtype; /* controller/d_type specific */
- char d_typename[16]; /* type name, e.g. "eagle" */
- char d_packname[16]; /* pack identifier */
- __u32 d_secsize; /* # of bytes per sector */
- __u32 d_nsectors; /* # of data sectors per track */
- __u32 d_ntracks; /* # of tracks per cylinder */
- __u32 d_ncylinders; /* # of data cylinders per unit */
- __u32 d_secpercyl; /* # of data sectors per cylinder */
- __u32 d_secperunit; /* # of data sectors per unit */
- __u16 d_sparespertrack; /* # of spare sectors per track */
- __u16 d_sparespercyl; /* # of spare sectors per cylinder */
- __u32 d_acylinders; /* # of alt. cylinders per unit */
- __u16 d_rpm; /* rotational speed */
- __u16 d_interleave; /* hardware sector interleave */
- __u16 d_trackskew; /* sector 0 skew, per track */
- __u16 d_cylskew; /* sector 0 skew, per cylinder */
- __u32 d_headswitch; /* head switch time, usec */
- __u32 d_trkseek; /* track-to-track seek, usec */
- __u32 d_flags; /* generic flags */
-#define NDDATA 5
- __u32 d_drivedata[NDDATA]; /* drive-type specific information */
-#define NSPARE 5
- __u32 d_spare[NSPARE]; /* reserved for future use */
- __le32 d_magic2; /* the magic number (again) */
- __le16 d_checksum; /* xor of data incl. partitions */
-
- /* filesystem and partition information: */
- __le16 d_npartitions; /* number of partitions in following */
- __le32 d_bbsize; /* size of boot area at sn0, bytes */
- __le32 d_sbsize; /* max size of fs superblock, bytes */
- struct bsd_partition { /* the partition table */
- __le32 p_size; /* number of sectors in partition */
- __le32 p_offset; /* starting sector */
- __le32 p_fsize; /* filesystem basic fragment size */
- __u8 p_fstype; /* filesystem type, see below */
- __u8 p_frag; /* filesystem fragments per block */
- __le16 p_cpg; /* filesystem cylinders per group */
- } d_partitions[BSD_MAXPARTITIONS]; /* actually may be more */
-};
-
-#endif /* CONFIG_BSD_DISKLABEL */
-
-#ifdef CONFIG_UNIXWARE_DISKLABEL
-/*
- * Unixware slices support by Andrzej Krzysztofowicz <ankry@mif.pg.gda.pl>
- * and Krzysztof G. Baranowski <kgb@knm.org.pl>
- */
-
-#define UNIXWARE_DISKMAGIC (0xCA5E600DUL) /* The disk magic number */
-#define UNIXWARE_DISKMAGIC2 (0x600DDEEEUL) /* The slice table magic nr */
-#define UNIXWARE_NUMSLICE 16
-#define UNIXWARE_FS_UNUSED 0 /* Unused slice entry ID */
-
-struct unixware_slice {
- __le16 s_label; /* label */
- __le16 s_flags; /* permission flags */
- __le32 start_sect; /* starting sector */
- __le32 nr_sects; /* number of sectors in slice */
-};
-
-struct unixware_disklabel {
- __le32 d_type; /* drive type */
- __le32 d_magic; /* the magic number */
- __le32 d_version; /* version number */
- char d_serial[12]; /* serial number of the device */
- __le32 d_ncylinders; /* # of data cylinders per device */
- __le32 d_ntracks; /* # of tracks per cylinder */
- __le32 d_nsectors; /* # of data sectors per track */
- __le32 d_secsize; /* # of bytes per sector */
- __le32 d_part_start; /* # of first sector of this partition */
- __le32 d_unknown1[12]; /* ? */
- __le32 d_alt_tbl; /* byte offset of alternate table */
- __le32 d_alt_len; /* byte length of alternate table */
- __le32 d_phys_cyl; /* # of physical cylinders per device */
- __le32 d_phys_trk; /* # of physical tracks per cylinder */
- __le32 d_phys_sec; /* # of physical sectors per track */
- __le32 d_phys_bytes; /* # of physical bytes per sector */
- __le32 d_unknown2; /* ? */
- __le32 d_unknown3; /* ? */
- __le32 d_pad[8]; /* pad */
-
- struct unixware_vtoc {
- __le32 v_magic; /* the magic number */
- __le32 v_version; /* version number */
- char v_name[8]; /* volume name */
- __le16 v_nslices; /* # of slices */
- __le16 v_unknown1; /* ? */
- __le32 v_reserved[10]; /* reserved */
- struct unixware_slice
- v_slice[UNIXWARE_NUMSLICE]; /* slice headers */
- } vtoc;
-
-}; /* 408 */
-
-#endif /* CONFIG_UNIXWARE_DISKLABEL */
-
-#ifdef CONFIG_MINIX_SUBPARTITION
-# define MINIX_NR_SUBPARTITIONS 4
-#endif /* CONFIG_MINIX_SUBPARTITION */
-
-#define ADDPART_FLAG_NONE 0
-#define ADDPART_FLAG_RAID 1
-#define ADDPART_FLAG_WHOLEDISK 2
-
-extern int blk_alloc_devt(struct hd_struct *part, dev_t *devt);
-extern void blk_free_devt(dev_t devt);
-extern void blk_invalidate_devt(dev_t devt);
extern dev_t blk_lookup_devt(const char *name, int partno);
-extern char *disk_name (struct gendisk *hd, int partno, char *buf);
-
-extern int disk_expand_part_tbl(struct gendisk *disk, int target);
-extern int rescan_partitions(struct gendisk *disk, struct block_device *bdev);
-extern int invalidate_partitions(struct gendisk *disk, struct block_device *bdev);
-extern struct hd_struct * __must_check add_partition(struct gendisk *disk,
- int partno, sector_t start,
- sector_t len, int flags,
- struct partition_meta_info
- *info);
-extern void __delete_partition(struct percpu_ref *);
-extern void delete_partition(struct gendisk *, int);
+
+int bdev_disk_changed(struct block_device *bdev, bool invalidate);
+int blk_add_partitions(struct gendisk *disk, struct block_device *bdev);
+int blk_drop_partitions(struct gendisk *disk, struct block_device *bdev);
extern void printk_all_partitions(void);
extern struct gendisk *__alloc_disk_node(int minors, int node_id);
@@ -644,20 +353,6 @@ extern void blk_register_region(dev_t devt, unsigned long range,
void *data);
extern void blk_unregister_region(dev_t devt, unsigned long range);
-extern ssize_t part_size_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-extern ssize_t part_stat_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-extern ssize_t part_inflight_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-#ifdef CONFIG_FAIL_MAKE_REQUEST
-extern ssize_t part_fail_show(struct device *dev,
- struct device_attribute *attr, char *buf);
-extern ssize_t part_fail_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count);
-#endif /* CONFIG_FAIL_MAKE_REQUEST */
-
#define alloc_disk_node(minors, node_id) \
({ \
static struct lock_class_key __key; \
@@ -676,100 +371,6 @@ extern ssize_t part_fail_store(struct device *dev,
#define alloc_disk(minors) alloc_disk_node(minors, NUMA_NO_NODE)
-static inline int hd_ref_init(struct hd_struct *part)
-{
- if (percpu_ref_init(&part->ref, __delete_partition, 0,
- GFP_KERNEL))
- return -ENOMEM;
- return 0;
-}
-
-static inline void hd_struct_get(struct hd_struct *part)
-{
- percpu_ref_get(&part->ref);
-}
-
-static inline int hd_struct_try_get(struct hd_struct *part)
-{
- return percpu_ref_tryget_live(&part->ref);
-}
-
-static inline void hd_struct_put(struct hd_struct *part)
-{
- percpu_ref_put(&part->ref);
-}
-
-static inline void hd_struct_kill(struct hd_struct *part)
-{
- percpu_ref_kill(&part->ref);
-}
-
-static inline void hd_free_part(struct hd_struct *part)
-{
- free_part_stats(part);
- free_part_info(part);
- percpu_ref_exit(&part->ref);
-}
-
-/*
- * Any access of part->nr_sects which is not protected by partition
- * bd_mutex or gendisk bdev bd_mutex, should be done using this
- * accessor function.
- *
- * Code written along the lines of i_size_read() and i_size_write().
- * CONFIG_PREEMPT case optimizes the case of UP kernel with preemption
- * on.
- */
-static inline sector_t part_nr_sects_read(struct hd_struct *part)
-{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
- sector_t nr_sects;
- unsigned seq;
- do {
- seq = read_seqcount_begin(&part->nr_sects_seq);
- nr_sects = part->nr_sects;
- } while (read_seqcount_retry(&part->nr_sects_seq, seq));
- return nr_sects;
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
- sector_t nr_sects;
-
- preempt_disable();
- nr_sects = part->nr_sects;
- preempt_enable();
- return nr_sects;
-#else
- return part->nr_sects;
-#endif
-}
-
-/*
- * Should be called with mutex lock held (typically bd_mutex) of partition
- * to provide mutual exlusion among writers otherwise seqcount might be
- * left in wrong state leaving the readers spinning infinitely.
- */
-static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
-{
-#if BITS_PER_LONG==32 && defined(CONFIG_SMP)
- write_seqcount_begin(&part->nr_sects_seq);
- part->nr_sects = size;
- write_seqcount_end(&part->nr_sects_seq);
-#elif BITS_PER_LONG==32 && defined(CONFIG_PREEMPT)
- preempt_disable();
- part->nr_sects = size;
- preempt_enable();
-#else
- part->nr_sects = size;
-#endif
-}
-
-#if defined(CONFIG_BLK_DEV_INTEGRITY)
-extern void blk_integrity_add(struct gendisk *);
-extern void blk_integrity_del(struct gendisk *);
-#else /* CONFIG_BLK_DEV_INTEGRITY */
-static inline void blk_integrity_add(struct gendisk *disk) { }
-static inline void blk_integrity_del(struct gendisk *disk) { }
-#endif /* CONFIG_BLK_DEV_INTEGRITY */
-
#else /* CONFIG_BLOCK */
static inline void printk_all_partitions(void) { }
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 61f2f6ff9467..e5b817cb86e7 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -612,6 +612,8 @@ static inline bool pm_suspended_storage(void)
/* The below functions must be run on a range from a single zone. */
extern int alloc_contig_range(unsigned long start, unsigned long end,
unsigned migratetype, gfp_t gfp_mask);
+extern struct page *alloc_contig_pages(unsigned long nr_pages, gfp_t gfp_mask,
+ int nid, nodemask_t *nodemask);
#endif
void free_contig_range(unsigned long pfn, unsigned int nr_pages);
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index b70af921c614..bf2d017dd7b7 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -158,6 +158,7 @@ int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
int gpiod_set_debounce(struct gpio_desc *desc, unsigned debounce);
int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
+void gpiod_toggle_active_low(struct gpio_desc *desc);
int gpiod_is_active_low(const struct gpio_desc *desc);
int gpiod_cansleep(const struct gpio_desc *desc);
@@ -176,11 +177,15 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
const char *propname, int index,
enum gpiod_flags dflags,
const char *label);
-struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
- const char *con_id, int index,
- struct fwnode_handle *child,
- enum gpiod_flags flags,
- const char *label);
+struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode,
+ const char *con_id, int index,
+ enum gpiod_flags flags,
+ const char *label);
+struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
+ struct fwnode_handle *child,
+ const char *con_id, int index,
+ enum gpiod_flags flags,
+ const char *label);
#else /* CONFIG_GPIOLIB */
@@ -479,6 +484,12 @@ static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
return -ENOSYS;
}
+static inline void gpiod_toggle_active_low(struct gpio_desc *desc)
+{
+ /* GPIO can never have been requested */
+ WARN_ON(desc);
+}
+
static inline int gpiod_is_active_low(const struct gpio_desc *desc)
{
/* GPIO can never have been requested */
@@ -532,17 +543,48 @@ struct gpio_desc *fwnode_get_named_gpiod(struct fwnode_handle *fwnode,
}
static inline
+struct gpio_desc *fwnode_gpiod_get_index(struct fwnode_handle *fwnode,
+ const char *con_id, int index,
+ enum gpiod_flags flags,
+ const char *label)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+static inline
+struct gpio_desc *devm_fwnode_gpiod_get_index(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *con_id, int index,
+ enum gpiod_flags flags,
+ const char *label)
+{
+ return ERR_PTR(-ENOSYS);
+}
+
+#endif /* CONFIG_GPIOLIB */
+
+static inline
+struct gpio_desc *devm_fwnode_gpiod_get(struct device *dev,
+ struct fwnode_handle *fwnode,
+ const char *con_id,
+ enum gpiod_flags flags,
+ const char *label)
+{
+ return devm_fwnode_gpiod_get_index(dev, fwnode, con_id, 0,
+ flags, label);
+}
+
+static inline
struct gpio_desc *devm_fwnode_get_index_gpiod_from_child(struct device *dev,
const char *con_id, int index,
struct fwnode_handle *child,
enum gpiod_flags flags,
const char *label)
{
- return ERR_PTR(-ENOSYS);
+ return devm_fwnode_gpiod_get_index(dev, child, con_id, index,
+ flags, label);
}
-#endif /* CONFIG_GPIOLIB */
-
static inline
struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev,
const char *con_id,
@@ -550,8 +592,7 @@ struct gpio_desc *devm_fwnode_get_gpiod_from_child(struct device *dev,
enum gpiod_flags flags,
const char *label)
{
- return devm_fwnode_get_index_gpiod_from_child(dev, con_id, 0, child,
- flags, label);
+ return devm_fwnode_gpiod_get_index(dev, child, con_id, 0, flags, label);
}
#if IS_ENABLED(CONFIG_GPIOLIB) && IS_ENABLED(CONFIG_OF_GPIO)
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 5dd9c982e2cb..6ef05bccc0a6 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -22,6 +22,9 @@ enum gpio_lookup_flags;
struct gpio_chip;
+#define GPIO_LINE_DIRECTION_IN 1
+#define GPIO_LINE_DIRECTION_OUT 0
+
/**
* struct gpio_irq_chip - GPIO interrupt controller
*/
@@ -91,16 +94,15 @@ struct gpio_irq_chip {
unsigned int *parent_type);
/**
- * @populate_parent_fwspec:
+ * @populate_parent_alloc_arg :
*
- * This optional callback populates the &struct irq_fwspec for the
- * parent's IRQ domain. If this is not specified, then
+ * This optional callback allocates and populates the specific struct
+ * for the parent's IRQ domain. If this is not specified, then
* &gpiochip_populate_parent_fwspec_twocell will be used. A four-cell
* variant named &gpiochip_populate_parent_fwspec_fourcell is also
* available.
*/
- void (*populate_parent_fwspec)(struct gpio_chip *chip,
- struct irq_fwspec *fwspec,
+ void *(*populate_parent_alloc_arg)(struct gpio_chip *chip,
unsigned int parent_hwirq,
unsigned int parent_type);
@@ -286,6 +288,9 @@ struct gpio_irq_chip {
* state (such as pullup/pulldown configuration).
* @init_valid_mask: optional routine to initialize @valid_mask, to be used if
* not all GPIOs are valid.
+ * @add_pin_ranges: optional routine to initialize pin ranges, to be used when
+ * requires special mapping of the pins that provides GPIO functionality.
+ * It is called after adding GPIO chip and before adding IRQ chip.
* @base: identifies the first GPIO number handled by this chip;
* or, if negative during registration, requests dynamic ID allocation.
* DEPRECATION: providing anything non-negative and nailing the base
@@ -376,6 +381,8 @@ struct gpio_chip {
unsigned long *valid_mask,
unsigned int ngpios);
+ int (*add_pin_ranges)(struct gpio_chip *chip);
+
int base;
u16 ngpio;
const char *const *names;
@@ -529,29 +536,27 @@ struct bgpio_pdata {
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
-void gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
- struct irq_fwspec *fwspec,
+void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
unsigned int parent_hwirq,
unsigned int parent_type);
-void gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
- struct irq_fwspec *fwspec,
+void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
unsigned int parent_hwirq,
unsigned int parent_type);
#else
-static inline void gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
- struct irq_fwspec *fwspec,
+static inline void *gpiochip_populate_parent_fwspec_twocell(struct gpio_chip *chip,
unsigned int parent_hwirq,
unsigned int parent_type)
{
+ return NULL;
}
-static inline void gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
- struct irq_fwspec *fwspec,
+static inline void *gpiochip_populate_parent_fwspec_fourcell(struct gpio_chip *chip,
unsigned int parent_hwirq,
unsigned int parent_type)
{
+ return NULL;
}
#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */
@@ -577,11 +582,6 @@ int gpiochip_irq_domain_activate(struct irq_domain *domain,
void gpiochip_irq_domain_deactivate(struct irq_domain *domain,
struct irq_data *data);
-void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
- struct irq_chip *irqchip,
- unsigned int parent_irq,
- irq_flow_handler_t parent_handler);
-
void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
unsigned int parent_irq);
@@ -707,7 +707,8 @@ gpiochip_remove_pin_ranges(struct gpio_chip *chip)
#endif /* CONFIG_PINCTRL */
-struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip, u16 hwnum,
+struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *chip,
+ unsigned int hwnum,
const char *label,
enum gpio_lookup_flags lflags,
enum gpiod_flags dflags);
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index da0af631ded5..7c8b82f69288 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -37,7 +37,7 @@ extern void rcu_nmi_exit(void);
do { \
account_irq_enter_time(current); \
preempt_count_add(HARDIRQ_OFFSET); \
- trace_hardirq_enter(); \
+ lockdep_hardirq_enter(); \
} while (0)
/*
@@ -50,7 +50,7 @@ extern void irq_enter(void);
*/
#define __irq_exit() \
do { \
- trace_hardirq_exit(); \
+ lockdep_hardirq_exit(); \
account_irq_exit_time(current); \
preempt_count_sub(HARDIRQ_OFFSET); \
} while (0)
@@ -74,12 +74,12 @@ extern void irq_exit(void);
BUG_ON(in_nmi()); \
preempt_count_add(NMI_OFFSET + HARDIRQ_OFFSET); \
rcu_nmi_enter(); \
- trace_hardirq_enter(); \
+ lockdep_hardirq_enter(); \
} while (0)
#define nmi_exit() \
do { \
- trace_hardirq_exit(); \
+ lockdep_hardirq_exit(); \
rcu_nmi_exit(); \
BUG_ON(!in_nmi()); \
preempt_count_sub(NMI_OFFSET + HARDIRQ_OFFSET); \
diff --git a/include/linux/hid.h b/include/linux/hid.h
index cd41f209043f..875f71132b14 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -492,7 +492,7 @@ struct hid_report_enum {
};
#define HID_MIN_BUFFER_SIZE 64 /* make sure there is at least a packet size of space */
-#define HID_MAX_BUFFER_SIZE 4096 /* 4kb */
+#define HID_MAX_BUFFER_SIZE 8192 /* 8kb */
#define HID_CONTROL_FIFO_SIZE 256 /* to init devices with >100 reports */
#define HID_OUTPUT_FIFO_SIZE 64
diff --git a/include/linux/hmm.h b/include/linux/hmm.h
index 3fec513b9c00..ddf9f7144c43 100644
--- a/include/linux/hmm.h
+++ b/include/linux/hmm.h
@@ -62,37 +62,12 @@
#include <linux/kconfig.h>
#include <asm/pgtable.h>
-#ifdef CONFIG_HMM_MIRROR
-
#include <linux/device.h>
#include <linux/migrate.h>
#include <linux/memremap.h>
#include <linux/completion.h>
#include <linux/mmu_notifier.h>
-
-/*
- * struct hmm - HMM per mm struct
- *
- * @mm: mm struct this HMM struct is bound to
- * @lock: lock protecting ranges list
- * @ranges: list of range being snapshotted
- * @mirrors: list of mirrors for this mm
- * @mmu_notifier: mmu notifier to track updates to CPU page table
- * @mirrors_sem: read/write semaphore protecting the mirrors list
- * @wq: wait queue for user waiting on a range invalidation
- * @notifiers: count of active mmu notifiers
- */
-struct hmm {
- struct mmu_notifier mmu_notifier;
- spinlock_t ranges_lock;
- struct list_head ranges;
- struct list_head mirrors;
- struct rw_semaphore mirrors_sem;
- wait_queue_head_t wq;
- long notifiers;
-};
-
/*
* hmm_pfn_flag_e - HMM flag enums
*
@@ -145,6 +120,8 @@ enum hmm_pfn_value_e {
/*
* struct hmm_range - track invalidation lock on virtual address range
*
+ * @notifier: a mmu_interval_notifier that includes the start/end
+ * @notifier_seq: result of mmu_interval_read_begin()
* @hmm: the core HMM structure this range is active against
* @vma: the vm area struct for the range
* @list: all range lock are on a list
@@ -159,8 +136,8 @@ enum hmm_pfn_value_e {
* @valid: pfns array did not change since it has been fill by an HMM function
*/
struct hmm_range {
- struct hmm *hmm;
- struct list_head list;
+ struct mmu_interval_notifier *notifier;
+ unsigned long notifier_seq;
unsigned long start;
unsigned long end;
uint64_t *pfns;
@@ -169,33 +146,9 @@ struct hmm_range {
uint64_t default_flags;
uint64_t pfn_flags_mask;
uint8_t pfn_shift;
- bool valid;
};
/*
- * hmm_range_wait_until_valid() - wait for range to be valid
- * @range: range affected by invalidation to wait on
- * @timeout: time out for wait in ms (ie abort wait after that period of time)
- * Return: true if the range is valid, false otherwise.
- */
-static inline bool hmm_range_wait_until_valid(struct hmm_range *range,
- unsigned long timeout)
-{
- return wait_event_timeout(range->hmm->wq, range->valid,
- msecs_to_jiffies(timeout)) != 0;
-}
-
-/*
- * hmm_range_valid() - test if a range is valid or not
- * @range: range
- * Return: true if the range is valid, false otherwise.
- */
-static inline bool hmm_range_valid(struct hmm_range *range)
-{
- return range->valid;
-}
-
-/*
* hmm_device_entry_to_page() - return struct page pointed to by a device entry
* @range: range use to decode device entry value
* @entry: device entry value to get corresponding struct page from
@@ -265,120 +218,6 @@ static inline uint64_t hmm_device_entry_from_pfn(const struct hmm_range *range,
}
/*
- * Mirroring: how to synchronize device page table with CPU page table.
- *
- * A device driver that is participating in HMM mirroring must always
- * synchronize with CPU page table updates. For this, device drivers can either
- * directly use mmu_notifier APIs or they can use the hmm_mirror API. Device
- * drivers can decide to register one mirror per device per process, or just
- * one mirror per process for a group of devices. The pattern is:
- *
- * int device_bind_address_space(..., struct mm_struct *mm, ...)
- * {
- * struct device_address_space *das;
- *
- * // Device driver specific initialization, and allocation of das
- * // which contains an hmm_mirror struct as one of its fields.
- * ...
- *
- * ret = hmm_mirror_register(&das->mirror, mm, &device_mirror_ops);
- * if (ret) {
- * // Cleanup on error
- * return ret;
- * }
- *
- * // Other device driver specific initialization
- * ...
- * }
- *
- * Once an hmm_mirror is registered for an address space, the device driver
- * will get callbacks through sync_cpu_device_pagetables() operation (see
- * hmm_mirror_ops struct).
- *
- * Device driver must not free the struct containing the hmm_mirror struct
- * before calling hmm_mirror_unregister(). The expected usage is to do that when
- * the device driver is unbinding from an address space.
- *
- *
- * void device_unbind_address_space(struct device_address_space *das)
- * {
- * // Device driver specific cleanup
- * ...
- *
- * hmm_mirror_unregister(&das->mirror);
- *
- * // Other device driver specific cleanup, and now das can be freed
- * ...
- * }
- */
-
-struct hmm_mirror;
-
-/*
- * struct hmm_mirror_ops - HMM mirror device operations callback
- *
- * @update: callback to update range on a device
- */
-struct hmm_mirror_ops {
- /* release() - release hmm_mirror
- *
- * @mirror: pointer to struct hmm_mirror
- *
- * This is called when the mm_struct is being released. The callback
- * must ensure that all access to any pages obtained from this mirror
- * is halted before the callback returns. All future access should
- * fault.
- */
- void (*release)(struct hmm_mirror *mirror);
-
- /* sync_cpu_device_pagetables() - synchronize page tables
- *
- * @mirror: pointer to struct hmm_mirror
- * @update: update information (see struct mmu_notifier_range)
- * Return: -EAGAIN if mmu_notifier_range_blockable(update) is false
- * and callback needs to block, 0 otherwise.
- *
- * This callback ultimately originates from mmu_notifiers when the CPU
- * page table is updated. The device driver must update its page table
- * in response to this callback. The update argument tells what action
- * to perform.
- *
- * The device driver must not return from this callback until the device
- * page tables are completely updated (TLBs flushed, etc); this is a
- * synchronous call.
- */
- int (*sync_cpu_device_pagetables)(
- struct hmm_mirror *mirror,
- const struct mmu_notifier_range *update);
-};
-
-/*
- * struct hmm_mirror - mirror struct for a device driver
- *
- * @hmm: pointer to struct hmm (which is unique per mm_struct)
- * @ops: device driver callback for HMM mirror operations
- * @list: for list of mirrors of a given mm
- *
- * Each address space (mm_struct) being mirrored by a device must register one
- * instance of an hmm_mirror struct with HMM. HMM will track the list of all
- * mirrors for each mm_struct.
- */
-struct hmm_mirror {
- struct hmm *hmm;
- const struct hmm_mirror_ops *ops;
- struct list_head list;
-};
-
-int hmm_mirror_register(struct hmm_mirror *mirror, struct mm_struct *mm);
-void hmm_mirror_unregister(struct hmm_mirror *mirror);
-
-/*
- * Please see Documentation/vm/hmm.rst for how to use the range API.
- */
-int hmm_range_register(struct hmm_range *range, struct hmm_mirror *mirror);
-void hmm_range_unregister(struct hmm_range *range);
-
-/*
* Retry fault if non-blocking, drop mmap_sem and return -EAGAIN in that case.
*/
#define HMM_FAULT_ALLOW_RETRY (1 << 0)
@@ -386,16 +225,17 @@ void hmm_range_unregister(struct hmm_range *range);
/* Don't fault in missing PTEs, just snapshot the current state. */
#define HMM_FAULT_SNAPSHOT (1 << 1)
+#ifdef CONFIG_HMM_MIRROR
+/*
+ * Please see Documentation/vm/hmm.rst for how to use the range API.
+ */
long hmm_range_fault(struct hmm_range *range, unsigned int flags);
-
-long hmm_range_dma_map(struct hmm_range *range,
- struct device *device,
- dma_addr_t *daddrs,
- unsigned int flags);
-long hmm_range_dma_unmap(struct hmm_range *range,
- struct device *device,
- dma_addr_t *daddrs,
- bool dirty);
+#else
+static inline long hmm_range_fault(struct hmm_range *range, unsigned int flags)
+{
+ return -EOPNOTSUPP;
+}
+#endif
/*
* HMM_RANGE_DEFAULT_TIMEOUT - default timeout (ms) when waiting for a range
@@ -406,6 +246,4 @@ long hmm_range_dma_unmap(struct hmm_range *range,
*/
#define HMM_RANGE_DEFAULT_TIMEOUT 1000
-#endif /* IS_ENABLED(CONFIG_HMM_MIRROR) */
-
#endif /* LINUX_HMM_H */
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index e6eea45e1154..62d216ff1097 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -18,22 +18,28 @@ enum host1x_class {
};
struct host1x_client;
+struct iommu_group;
/**
* struct host1x_client_ops - host1x client operations
* @init: host1x client initialization code
* @exit: host1x client tear down code
+ * @suspend: host1x client suspend code
+ * @resume: host1x client resume code
*/
struct host1x_client_ops {
int (*init)(struct host1x_client *client);
int (*exit)(struct host1x_client *client);
+ int (*suspend)(struct host1x_client *client);
+ int (*resume)(struct host1x_client *client);
};
/**
* struct host1x_client - host1x client structure
* @list: list node for the host1x client
- * @parent: pointer to struct device representing the host1x controller
+ * @host: pointer to struct device representing the host1x controller
* @dev: pointer to struct device backing this host1x client
+ * @group: IOMMU group that this client is a member of
* @ops: host1x client operations
* @class: host1x class represented by this client
* @channel: host1x channel associated with this client
@@ -42,8 +48,9 @@ struct host1x_client_ops {
*/
struct host1x_client {
struct list_head list;
- struct device *parent;
+ struct device *host;
struct device *dev;
+ struct iommu_group *group;
const struct host1x_client_ops *ops;
@@ -52,6 +59,10 @@ struct host1x_client {
struct host1x_syncpt **syncpts;
unsigned int num_syncpts;
+
+ struct host1x_client *parent;
+ unsigned int usecount;
+ struct mutex lock;
};
/*
@@ -64,12 +75,11 @@ struct sg_table;
struct host1x_bo_ops {
struct host1x_bo *(*get)(struct host1x_bo *bo);
void (*put)(struct host1x_bo *bo);
- dma_addr_t (*pin)(struct host1x_bo *bo, struct sg_table **sgt);
- void (*unpin)(struct host1x_bo *bo, struct sg_table *sgt);
+ struct sg_table *(*pin)(struct device *dev, struct host1x_bo *bo,
+ dma_addr_t *phys);
+ void (*unpin)(struct device *dev, struct sg_table *sgt);
void *(*mmap)(struct host1x_bo *bo);
void (*munmap)(struct host1x_bo *bo, void *addr);
- void *(*kmap)(struct host1x_bo *bo, unsigned int pagenum);
- void (*kunmap)(struct host1x_bo *bo, unsigned int pagenum, void *addr);
};
struct host1x_bo {
@@ -92,15 +102,17 @@ static inline void host1x_bo_put(struct host1x_bo *bo)
bo->ops->put(bo);
}
-static inline dma_addr_t host1x_bo_pin(struct host1x_bo *bo,
- struct sg_table **sgt)
+static inline struct sg_table *host1x_bo_pin(struct device *dev,
+ struct host1x_bo *bo,
+ dma_addr_t *phys)
{
- return bo->ops->pin(bo, sgt);
+ return bo->ops->pin(dev, bo, phys);
}
-static inline void host1x_bo_unpin(struct host1x_bo *bo, struct sg_table *sgt)
+static inline void host1x_bo_unpin(struct device *dev, struct host1x_bo *bo,
+ struct sg_table *sgt)
{
- bo->ops->unpin(bo, sgt);
+ bo->ops->unpin(dev, sgt);
}
static inline void *host1x_bo_mmap(struct host1x_bo *bo)
@@ -113,17 +125,6 @@ static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
bo->ops->munmap(bo, addr);
}
-static inline void *host1x_bo_kmap(struct host1x_bo *bo, unsigned int pagenum)
-{
- return bo->ops->kmap(bo, pagenum);
-}
-
-static inline void host1x_bo_kunmap(struct host1x_bo *bo,
- unsigned int pagenum, void *addr)
-{
- bo->ops->kunmap(bo, pagenum, addr);
-}
-
/*
* host1x syncpoints
*/
@@ -158,7 +159,7 @@ u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
struct host1x_channel;
struct host1x_job;
-struct host1x_channel *host1x_channel_request(struct device *dev);
+struct host1x_channel *host1x_channel_request(struct host1x_client *client);
struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
void host1x_channel_put(struct host1x_channel *channel);
int host1x_job_submit(struct host1x_job *job);
@@ -167,6 +168,9 @@ int host1x_job_submit(struct host1x_job *job);
* host1x job
*/
+#define HOST1X_RELOC_READ (1 << 0)
+#define HOST1X_RELOC_WRITE (1 << 1)
+
struct host1x_reloc {
struct {
struct host1x_bo *bo;
@@ -177,6 +181,7 @@ struct host1x_reloc {
unsigned long offset;
} target;
unsigned long shift;
+ unsigned long flags;
};
struct host1x_job {
@@ -312,6 +317,9 @@ int host1x_device_exit(struct host1x_device *device);
int host1x_client_register(struct host1x_client *client);
int host1x_client_unregister(struct host1x_client *client);
+int host1x_client_suspend(struct host1x_client *client);
+int host1x_client_resume(struct host1x_client *client);
+
struct tegra_mipi_device;
struct tegra_mipi_device *tegra_mipi_request(struct device *device);
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 1b9a51a1bccb..15c8ac313678 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -456,12 +456,18 @@ extern u64 hrtimer_next_event_without(const struct hrtimer *exclude);
extern bool hrtimer_active(const struct hrtimer *timer);
-/*
- * Helper function to check, whether the timer is on one of the queues
+/**
+ * hrtimer_is_queued = check, whether the timer is on one of the queues
+ * @timer: Timer to check
+ *
+ * Returns: True if the timer is queued, false otherwise
+ *
+ * The function can be used lockless, but it gives only a current snapshot.
*/
-static inline int hrtimer_is_queued(struct hrtimer *timer)
+static inline bool hrtimer_is_queued(struct hrtimer *timer)
{
- return timer->state & HRTIMER_STATE_ENQUEUED;
+ /* The READ_ONCE pairs with the update functions of timer->state */
+ return !!(READ_ONCE(timer->state) & HRTIMER_STATE_ENQUEUED);
}
/*
@@ -502,8 +508,7 @@ static inline u64 hrtimer_forward_now(struct hrtimer *timer,
/* Precise sleep: */
extern int nanosleep_copyout(struct restart_block *, struct timespec64 *);
-extern long hrtimer_nanosleep(const struct timespec64 *rqtp,
- const enum hrtimer_mode mode,
+extern long hrtimer_nanosleep(ktime_t rqtp, const enum hrtimer_mode mode,
const clockid_t clockid);
extern int schedule_hrtimeout_range(ktime_t *expires, u64 delta,
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 93d5cf0bc716..5aca3d1bdb32 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -160,6 +160,7 @@ extern unsigned long thp_get_unmapped_area(struct file *filp,
extern void prep_transhuge_page(struct page *page);
extern void free_transhuge_page(struct page *page);
+bool is_transparent_hugepage(struct page *page);
bool can_split_huge_page(struct page *page, int *pextra_pins);
int split_huge_page_to_list(struct page *page, struct list_head *list);
@@ -216,7 +217,6 @@ static inline int is_swap_pmd(pmd_t pmd)
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
struct vm_area_struct *vma)
{
- VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
return __pmd_trans_huge_lock(pmd, vma);
else
@@ -225,7 +225,6 @@ static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
struct vm_area_struct *vma)
{
- VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
if (pud_trans_huge(*pud) || pud_devmap(*pud))
return __pud_trans_huge_lock(pud, vma);
else
@@ -310,6 +309,11 @@ static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
static inline void prep_transhuge_page(struct page *page) {}
+static inline bool is_transparent_hugepage(struct page *page)
+{
+ return false;
+}
+
#define transparent_hugepage_flags 0UL
#define thp_get_unmapped_area NULL
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 53fc34f930d0..1e897e4168ac 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -105,8 +105,7 @@ void move_hugetlb_state(struct page *oldpage, struct page *newpage, int reason);
void free_huge_page(struct page *page);
void hugetlb_fix_reserve_counts(struct inode *inode);
extern struct mutex *hugetlb_fault_mutex_table;
-u32 hugetlb_fault_mutex_hash(struct hstate *h, struct address_space *mapping,
- pgoff_t idx, unsigned long address);
+u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx);
pte_t *huge_pmd_share(struct mm_struct *mm, unsigned long addr, pud_t *pud);
@@ -164,38 +163,130 @@ static inline void adjust_range_if_pmd_sharing_possible(
{
}
-#define follow_hugetlb_page(m,v,p,vs,a,b,i,w,n) ({ BUG(); 0; })
-#define follow_huge_addr(mm, addr, write) ERR_PTR(-EINVAL)
-#define copy_hugetlb_page_range(src, dst, vma) ({ BUG(); 0; })
+static inline long follow_hugetlb_page(struct mm_struct *mm,
+ struct vm_area_struct *vma, struct page **pages,
+ struct vm_area_struct **vmas, unsigned long *position,
+ unsigned long *nr_pages, long i, unsigned int flags,
+ int *nonblocking)
+{
+ BUG();
+ return 0;
+}
+
+static inline struct page *follow_huge_addr(struct mm_struct *mm,
+ unsigned long address, int write)
+{
+ return ERR_PTR(-EINVAL);
+}
+
+static inline int copy_hugetlb_page_range(struct mm_struct *dst,
+ struct mm_struct *src, struct vm_area_struct *vma)
+{
+ BUG();
+ return 0;
+}
+
static inline void hugetlb_report_meminfo(struct seq_file *m)
{
}
-#define hugetlb_report_node_meminfo(n, buf) 0
+
+static inline int hugetlb_report_node_meminfo(int nid, char *buf)
+{
+ return 0;
+}
+
static inline void hugetlb_show_meminfo(void)
{
}
-#define follow_huge_pd(vma, addr, hpd, flags, pdshift) NULL
-#define follow_huge_pmd(mm, addr, pmd, flags) NULL
-#define follow_huge_pud(mm, addr, pud, flags) NULL
-#define follow_huge_pgd(mm, addr, pgd, flags) NULL
-#define prepare_hugepage_range(file, addr, len) (-EINVAL)
-#define pmd_huge(x) 0
-#define pud_huge(x) 0
-#define is_hugepage_only_range(mm, addr, len) 0
-#define hugetlb_free_pgd_range(tlb, addr, end, floor, ceiling) ({BUG(); 0; })
-#define hugetlb_mcopy_atomic_pte(dst_mm, dst_pte, dst_vma, dst_addr, \
- src_addr, pagep) ({ BUG(); 0; })
-#define huge_pte_offset(mm, address, sz) 0
+
+static inline struct page *follow_huge_pd(struct vm_area_struct *vma,
+ unsigned long address, hugepd_t hpd, int flags,
+ int pdshift)
+{
+ return NULL;
+}
+
+static inline struct page *follow_huge_pmd(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmd, int flags)
+{
+ return NULL;
+}
+
+static inline struct page *follow_huge_pud(struct mm_struct *mm,
+ unsigned long address, pud_t *pud, int flags)
+{
+ return NULL;
+}
+
+static inline struct page *follow_huge_pgd(struct mm_struct *mm,
+ unsigned long address, pgd_t *pgd, int flags)
+{
+ return NULL;
+}
+
+static inline int prepare_hugepage_range(struct file *file,
+ unsigned long addr, unsigned long len)
+{
+ return -EINVAL;
+}
+
+static inline int pmd_huge(pmd_t pmd)
+{
+ return 0;
+}
+
+static inline int pud_huge(pud_t pud)
+{
+ return 0;
+}
+
+static inline int is_hugepage_only_range(struct mm_struct *mm,
+ unsigned long addr, unsigned long len)
+{
+ return 0;
+}
+
+static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb,
+ unsigned long addr, unsigned long end,
+ unsigned long floor, unsigned long ceiling)
+{
+ BUG();
+}
+
+static inline int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
+ pte_t *dst_pte,
+ struct vm_area_struct *dst_vma,
+ unsigned long dst_addr,
+ unsigned long src_addr,
+ struct page **pagep)
+{
+ BUG();
+ return 0;
+}
+
+static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
+ unsigned long sz)
+{
+ return NULL;
+}
static inline bool isolate_huge_page(struct page *page, struct list_head *list)
{
return false;
}
-#define putback_active_hugepage(p) do {} while (0)
-#define move_hugetlb_state(old, new, reason) do {} while (0)
-static inline unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
- unsigned long address, unsigned long end, pgprot_t newprot)
+static inline void putback_active_hugepage(struct page *page)
+{
+}
+
+static inline void move_hugetlb_state(struct page *oldpage,
+ struct page *newpage, int reason)
+{
+}
+
+static inline unsigned long hugetlb_change_protection(
+ struct vm_area_struct *vma, unsigned long address,
+ unsigned long end, pgprot_t newprot)
{
return 0;
}
@@ -213,9 +304,10 @@ static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
{
BUG();
}
+
static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
- struct vm_area_struct *vma, unsigned long address,
- unsigned int flags)
+ struct vm_area_struct *vma, unsigned long address,
+ unsigned int flags)
{
BUG();
return 0;
@@ -340,7 +432,8 @@ struct hstate {
unsigned int surplus_huge_pages_node[MAX_NUMNODES];
#ifdef CONFIG_CGROUP_HUGETLB
/* cgroup control files */
- struct cftype cgroup_files[5];
+ struct cftype cgroup_files_dfl[5];
+ struct cftype cgroup_files_legacy[5];
#endif
char name[HSTATE_NAME_LEN];
};
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 72579168189d..5e609f25878c 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -27,6 +27,7 @@ enum hwmon_sensor_types {
hwmon_humidity,
hwmon_fan,
hwmon_pwm,
+ hwmon_intrusion,
hwmon_max,
};
@@ -59,7 +60,8 @@ enum hwmon_chip_attributes {
#define HWMON_C_TEMP_SAMPLES BIT(hwmon_chip_temp_samples)
enum hwmon_temp_attributes {
- hwmon_temp_input = 0,
+ hwmon_temp_enable,
+ hwmon_temp_input,
hwmon_temp_type,
hwmon_temp_lcrit,
hwmon_temp_lcrit_hyst,
@@ -85,6 +87,7 @@ enum hwmon_temp_attributes {
hwmon_temp_reset_history,
};
+#define HWMON_T_ENABLE BIT(hwmon_temp_enable)
#define HWMON_T_INPUT BIT(hwmon_temp_input)
#define HWMON_T_TYPE BIT(hwmon_temp_type)
#define HWMON_T_LCRIT BIT(hwmon_temp_lcrit)
@@ -111,6 +114,7 @@ enum hwmon_temp_attributes {
#define HWMON_T_RESET_HISTORY BIT(hwmon_temp_reset_history)
enum hwmon_in_attributes {
+ hwmon_in_enable,
hwmon_in_input,
hwmon_in_min,
hwmon_in_max,
@@ -126,9 +130,9 @@ enum hwmon_in_attributes {
hwmon_in_max_alarm,
hwmon_in_lcrit_alarm,
hwmon_in_crit_alarm,
- hwmon_in_enable,
};
+#define HWMON_I_ENABLE BIT(hwmon_in_enable)
#define HWMON_I_INPUT BIT(hwmon_in_input)
#define HWMON_I_MIN BIT(hwmon_in_min)
#define HWMON_I_MAX BIT(hwmon_in_max)
@@ -144,9 +148,9 @@ enum hwmon_in_attributes {
#define HWMON_I_MAX_ALARM BIT(hwmon_in_max_alarm)
#define HWMON_I_LCRIT_ALARM BIT(hwmon_in_lcrit_alarm)
#define HWMON_I_CRIT_ALARM BIT(hwmon_in_crit_alarm)
-#define HWMON_I_ENABLE BIT(hwmon_in_enable)
enum hwmon_curr_attributes {
+ hwmon_curr_enable,
hwmon_curr_input,
hwmon_curr_min,
hwmon_curr_max,
@@ -164,6 +168,7 @@ enum hwmon_curr_attributes {
hwmon_curr_crit_alarm,
};
+#define HWMON_C_ENABLE BIT(hwmon_curr_enable)
#define HWMON_C_INPUT BIT(hwmon_curr_input)
#define HWMON_C_MIN BIT(hwmon_curr_min)
#define HWMON_C_MAX BIT(hwmon_curr_max)
@@ -181,6 +186,7 @@ enum hwmon_curr_attributes {
#define HWMON_C_CRIT_ALARM BIT(hwmon_curr_crit_alarm)
enum hwmon_power_attributes {
+ hwmon_power_enable,
hwmon_power_average,
hwmon_power_average_interval,
hwmon_power_average_interval_max,
@@ -211,6 +217,7 @@ enum hwmon_power_attributes {
hwmon_power_crit_alarm,
};
+#define HWMON_P_ENABLE BIT(hwmon_power_enable)
#define HWMON_P_AVERAGE BIT(hwmon_power_average)
#define HWMON_P_AVERAGE_INTERVAL BIT(hwmon_power_average_interval)
#define HWMON_P_AVERAGE_INTERVAL_MAX BIT(hwmon_power_average_interval_max)
@@ -241,14 +248,17 @@ enum hwmon_power_attributes {
#define HWMON_P_CRIT_ALARM BIT(hwmon_power_crit_alarm)
enum hwmon_energy_attributes {
+ hwmon_energy_enable,
hwmon_energy_input,
hwmon_energy_label,
};
+#define HWMON_E_ENABLE BIT(hwmon_energy_enable)
#define HWMON_E_INPUT BIT(hwmon_energy_input)
#define HWMON_E_LABEL BIT(hwmon_energy_label)
enum hwmon_humidity_attributes {
+ hwmon_humidity_enable,
hwmon_humidity_input,
hwmon_humidity_label,
hwmon_humidity_min,
@@ -259,6 +269,7 @@ enum hwmon_humidity_attributes {
hwmon_humidity_fault,
};
+#define HWMON_H_ENABLE BIT(hwmon_humidity_enable)
#define HWMON_H_INPUT BIT(hwmon_humidity_input)
#define HWMON_H_LABEL BIT(hwmon_humidity_label)
#define HWMON_H_MIN BIT(hwmon_humidity_min)
@@ -269,6 +280,7 @@ enum hwmon_humidity_attributes {
#define HWMON_H_FAULT BIT(hwmon_humidity_fault)
enum hwmon_fan_attributes {
+ hwmon_fan_enable,
hwmon_fan_input,
hwmon_fan_label,
hwmon_fan_min,
@@ -282,6 +294,7 @@ enum hwmon_fan_attributes {
hwmon_fan_fault,
};
+#define HWMON_F_ENABLE BIT(hwmon_fan_enable)
#define HWMON_F_INPUT BIT(hwmon_fan_input)
#define HWMON_F_LABEL BIT(hwmon_fan_label)
#define HWMON_F_MIN BIT(hwmon_fan_min)
@@ -306,6 +319,13 @@ enum hwmon_pwm_attributes {
#define HWMON_PWM_MODE BIT(hwmon_pwm_mode)
#define HWMON_PWM_FREQ BIT(hwmon_pwm_freq)
+enum hwmon_intrusion_attributes {
+ hwmon_intrusion_alarm,
+ hwmon_intrusion_beep,
+};
+#define HWMON_INTRUSION_ALARM BIT(hwmon_intrusion_alarm)
+#define HWMON_INTRUSION_BEEP BIT(hwmon_intrusion_beep)
+
/**
* struct hwmon_ops - hwmon device operations
* @is_visible: Callback to return attribute visibility. Mandatory.
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index b4a017093b69..692c89ccf5df 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -182,19 +182,21 @@ static inline u32 hv_get_avail_to_write_percent(
* 2 . 4 (Windows 8)
* 3 . 0 (Windows 8 R2)
* 4 . 0 (Windows 10)
+ * 4 . 1 (Windows 10 RS3)
* 5 . 0 (Newer Windows 10)
+ * 5 . 1 (Windows 10 RS4)
+ * 5 . 2 (Windows Server 2019, RS5)
*/
#define VERSION_WS2008 ((0 << 16) | (13))
#define VERSION_WIN7 ((1 << 16) | (1))
#define VERSION_WIN8 ((2 << 16) | (4))
#define VERSION_WIN8_1 ((3 << 16) | (0))
-#define VERSION_WIN10 ((4 << 16) | (0))
+#define VERSION_WIN10 ((4 << 16) | (0))
+#define VERSION_WIN10_V4_1 ((4 << 16) | (1))
#define VERSION_WIN10_V5 ((5 << 16) | (0))
-
-#define VERSION_INVAL -1
-
-#define VERSION_CURRENT VERSION_WIN10_V5
+#define VERSION_WIN10_V5_1 ((5 << 16) | (1))
+#define VERSION_WIN10_V5_2 ((5 << 16) | (2))
/* Make maximum size of pipe payload of 16K */
#define MAX_PIPE_DATA_PAYLOAD (sizeof(u8) * 16384)
@@ -423,6 +425,8 @@ enum vmbus_channel_message_type {
CHANNELMSG_19 = 19,
CHANNELMSG_20 = 20,
CHANNELMSG_TL_CONNECT_REQUEST = 21,
+ CHANNELMSG_22 = 22,
+ CHANNELMSG_TL_CONNECT_RESULT = 23,
CHANNELMSG_COUNT
};
@@ -932,6 +936,21 @@ struct vmbus_channel {
* full outbound ring buffer.
*/
u64 out_full_first;
+
+ /* enabling/disabling fuzz testing on the channel (default is false)*/
+ bool fuzz_testing_state;
+
+ /*
+ * Interrupt delay will delay the guest from emptying the ring buffer
+ * for a specific amount of time. The delay is in microseconds and will
+ * be between 1 to a maximum of 1000, its default is 0 (no delay).
+ * The Message delay will delay guest reading on a per message basis
+ * in microseconds between 1 to 1000 with the default being 0
+ * (no delay).
+ */
+ u32 fuzz_testing_interrupt_delay;
+ u32 fuzz_testing_message_delay;
+
};
static inline bool is_hvsock_channel(const struct vmbus_channel *c)
@@ -1180,6 +1199,10 @@ struct hv_device {
struct vmbus_channel *channel;
struct kset *channels_kset;
+
+ /* place holder to keep track of the dir for hv device in debugfs */
+ struct dentry *debug_dir;
+
};
@@ -1412,6 +1435,8 @@ struct hv_util_service {
void (*util_cb)(void *);
int (*util_init)(struct hv_util_service *);
void (*util_deinit)(void);
+ int (*util_pre_suspend)(void);
+ int (*util_pre_resume)(void);
};
struct vmbuspipe_hdr {
diff --git a/include/linux/i2c-pxa.h b/include/linux/i2c-pxa.h
deleted file mode 100644
index a897e2b507b6..000000000000
--- a/include/linux/i2c-pxa.h
+++ /dev/null
@@ -1,18 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_I2C_ALGO_PXA_H
-#define _LINUX_I2C_ALGO_PXA_H
-
-typedef enum i2c_slave_event_e {
- I2C_SLAVE_EVENT_START_READ,
- I2C_SLAVE_EVENT_START_WRITE,
- I2C_SLAVE_EVENT_STOP
-} i2c_slave_event_t;
-
-struct i2c_slave_client {
- void *data;
- void (*event)(void *ptr, i2c_slave_event_t event);
- int (*read) (void *ptr);
- void (*write)(void *ptr, unsigned int val);
-};
-
-#endif /* _LINUX_I2C_ALGO_PXA_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 1361637c369d..f6b942150631 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -50,8 +50,8 @@ struct property_entry;
* transmit an arbitrary number of messages without interruption.
* @count must be be less than 64k since msg.len is u16.
*/
-extern int i2c_transfer_buffer_flags(const struct i2c_client *client,
- char *buf, int count, u16 flags);
+int i2c_transfer_buffer_flags(const struct i2c_client *client,
+ char *buf, int count, u16 flags);
/**
* i2c_master_recv - issue a single I2C message in master receive mode
@@ -115,11 +115,9 @@ static inline int i2c_master_send_dmasafe(const struct i2c_client *client,
/* Transfer num messages.
*/
-extern int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num);
+int i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num);
/* Unlocked flavor */
-extern int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs,
- int num);
+int __i2c_transfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num);
/* This is the very generalized SMBus access routine. You probably do not
want to use this, though; one of the functions below may be much easier,
@@ -138,16 +136,14 @@ s32 __i2c_smbus_xfer(struct i2c_adapter *adapter, u16 addr,
/* Now follow the 'nice' access routines. These also document the calling
conventions of i2c_smbus_xfer. */
-extern s32 i2c_smbus_read_byte(const struct i2c_client *client);
-extern s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value);
-extern s32 i2c_smbus_read_byte_data(const struct i2c_client *client,
- u8 command);
-extern s32 i2c_smbus_write_byte_data(const struct i2c_client *client,
- u8 command, u8 value);
-extern s32 i2c_smbus_read_word_data(const struct i2c_client *client,
- u8 command);
-extern s32 i2c_smbus_write_word_data(const struct i2c_client *client,
- u8 command, u16 value);
+s32 i2c_smbus_read_byte(const struct i2c_client *client);
+s32 i2c_smbus_write_byte(const struct i2c_client *client, u8 value);
+s32 i2c_smbus_read_byte_data(const struct i2c_client *client, u8 command);
+s32 i2c_smbus_write_byte_data(const struct i2c_client *client,
+ u8 command, u8 value);
+s32 i2c_smbus_read_word_data(const struct i2c_client *client, u8 command);
+s32 i2c_smbus_write_word_data(const struct i2c_client *client,
+ u8 command, u16 value);
static inline s32
i2c_smbus_read_word_swapped(const struct i2c_client *client, u8 command)
@@ -165,19 +161,18 @@ i2c_smbus_write_word_swapped(const struct i2c_client *client,
}
/* Returns the number of read bytes */
-extern s32 i2c_smbus_read_block_data(const struct i2c_client *client,
- u8 command, u8 *values);
-extern s32 i2c_smbus_write_block_data(const struct i2c_client *client,
- u8 command, u8 length, const u8 *values);
+s32 i2c_smbus_read_block_data(const struct i2c_client *client,
+ u8 command, u8 *values);
+s32 i2c_smbus_write_block_data(const struct i2c_client *client,
+ u8 command, u8 length, const u8 *values);
/* Returns the number of read bytes */
-extern s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client,
- u8 command, u8 length, u8 *values);
-extern s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
- u8 command, u8 length,
- const u8 *values);
-extern s32
-i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
- u8 command, u8 length, u8 *values);
+s32 i2c_smbus_read_i2c_block_data(const struct i2c_client *client,
+ u8 command, u8 length, u8 *values);
+s32 i2c_smbus_write_i2c_block_data(const struct i2c_client *client,
+ u8 command, u8 length, const u8 *values);
+s32 i2c_smbus_read_i2c_block_data_or_emulated(const struct i2c_client *client,
+ u8 command, u8 length,
+ u8 *values);
int i2c_get_device_id(const struct i2c_client *client,
struct i2c_device_identity *id);
#endif /* I2C */
@@ -300,6 +295,7 @@ struct i2c_driver {
* generic enough to hide second-sourcing and compatible revisions.
* @adapter: manages the bus segment hosting this I2C device
* @dev: Driver model device node for the slave.
+ * @init_irq: IRQ that was set at initialization
* @irq: indicates the IRQ generated by this device (if any)
* @detected: member of an i2c_driver.clients list or i2c-core's
* userspace_devices list
@@ -336,10 +332,10 @@ struct i2c_client {
};
#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
-extern struct i2c_client *i2c_verify_client(struct device *dev);
-extern struct i2c_adapter *i2c_verify_adapter(struct device *dev);
-extern const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
- const struct i2c_client *client);
+struct i2c_client *i2c_verify_client(struct device *dev);
+struct i2c_adapter *i2c_verify_adapter(struct device *dev);
+const struct i2c_device_id *i2c_match_id(const struct i2c_device_id *id,
+ const struct i2c_client *client);
static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
{
@@ -368,9 +364,9 @@ enum i2c_slave_event {
I2C_SLAVE_STOP,
};
-extern int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb);
-extern int i2c_slave_unregister(struct i2c_client *client);
-extern bool i2c_detect_slave_mode(struct device *dev);
+int i2c_slave_register(struct i2c_client *client, i2c_slave_cb_t slave_cb);
+int i2c_slave_unregister(struct i2c_client *client);
+bool i2c_detect_slave_mode(struct device *dev);
static inline int i2c_slave_event(struct i2c_client *client,
enum i2c_slave_event event, u8 *val)
@@ -439,10 +435,10 @@ struct i2c_board_info {
* with integrated I2C, a config eeprom, sensors, and a codec that's
* used in conjunction with the primary hardware.
*/
-extern struct i2c_client *
+struct i2c_client *
i2c_new_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
-extern struct i2c_client *
+struct i2c_client *
i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *info);
/* If you don't know the exact address of an I2C device, use this variant
@@ -451,33 +447,33 @@ i2c_new_client_device(struct i2c_adapter *adap, struct i2c_board_info const *inf
* it must return 1 on successful probe, 0 otherwise. If it is not provided,
* a default probing method is used.
*/
-extern struct i2c_client *
+struct i2c_client *
+i2c_new_scanned_device(struct i2c_adapter *adap,
+ struct i2c_board_info *info,
+ unsigned short const *addr_list,
+ int (*probe)(struct i2c_adapter *adap, unsigned short addr));
+
+struct i2c_client *
i2c_new_probed_device(struct i2c_adapter *adap,
- struct i2c_board_info *info,
- unsigned short const *addr_list,
- int (*probe)(struct i2c_adapter *adap, unsigned short addr));
+ struct i2c_board_info *info,
+ unsigned short const *addr_list,
+ int (*probe)(struct i2c_adapter *adap, unsigned short addr));
/* Common custom probe functions */
-extern int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr);
-
-/* For devices that use several addresses, use i2c_new_dummy() to make
- * client handles for the extra addresses.
- */
-extern struct i2c_client *
-i2c_new_dummy(struct i2c_adapter *adap, u16 address);
+int i2c_probe_func_quick_read(struct i2c_adapter *adap, unsigned short addr);
-extern struct i2c_client *
+struct i2c_client *
i2c_new_dummy_device(struct i2c_adapter *adapter, u16 address);
-extern struct i2c_client *
+struct i2c_client *
devm_i2c_new_dummy_device(struct device *dev, struct i2c_adapter *adap, u16 address);
-extern struct i2c_client *
+struct i2c_client *
i2c_new_ancillary_device(struct i2c_client *client,
- const char *name,
- u16 default_addr);
+ const char *name,
+ u16 default_addr);
-extern void i2c_unregister_device(struct i2c_client *client);
+void i2c_unregister_device(struct i2c_client *client);
#endif /* I2C */
/* Mainboard arch_initcall() code should register all its I2C devices.
@@ -485,7 +481,7 @@ extern void i2c_unregister_device(struct i2c_client *client);
* Modules for add-on boards must use other calls.
*/
#ifdef CONFIG_I2C_BOARDINFO
-extern int
+int
i2c_register_board_info(int busnum, struct i2c_board_info const *info,
unsigned n);
#else
@@ -510,7 +506,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
* @smbus_xfer_atomic: same as @smbus_xfer. Yet, only using atomic context
* so e.g. PMICs can be accessed very late before shutdown. Optional.
* @functionality: Return the flags that this algorithm/adapter pair supports
- * from the I2C_FUNC_* flags.
+ * from the ``I2C_FUNC_*`` flags.
* @reg_slave: Register given client to I2C slave mode of this adapter
* @unreg_slave: Unregister given client from I2C slave mode of this adapter
*
@@ -519,7 +515,7 @@ i2c_register_board_info(int busnum, struct i2c_board_info const *info,
* be addressed using the same bus algorithms - i.e. bit-banging or the PCF8584
* to name two of the most common.
*
- * The return codes from the @master_xfer{_atomic} fields should indicate the
+ * The return codes from the ``master_xfer{_atomic}`` fields should indicate the
* type of error code that occurred during the transfer, as documented in the
* Kernel Documentation file Documentation/i2c/fault-codes.rst.
*/
@@ -575,6 +571,10 @@ struct i2c_lock_operations {
* @scl_int_delay_ns: time IP core additionally needs to setup SCL in ns
* @sda_fall_ns: time SDA signal takes to fall in ns; t(f) in the I2C specification
* @sda_hold_ns: time IP core additionally needs to hold SDA in ns
+ * @digital_filter_width_ns: width in ns of spikes on i2c lines that the IP core
+ * digital filter can filter out
+ * @analog_filter_cutoff_freq_hz: threshold frequency for the low pass IP core
+ * analog filter
*/
struct i2c_timings {
u32 bus_freq_hz;
@@ -583,6 +583,8 @@ struct i2c_timings {
u32 scl_int_delay_ns;
u32 sda_fall_ns;
u32 sda_hold_ns;
+ u32 digital_filter_width_ns;
+ u32 analog_filter_cutoff_freq_hz;
};
/**
@@ -833,28 +835,30 @@ static inline void i2c_mark_adapter_resumed(struct i2c_adapter *adap)
/* administration...
*/
#if IS_ENABLED(CONFIG_I2C)
-extern int i2c_add_adapter(struct i2c_adapter *adap);
-extern void i2c_del_adapter(struct i2c_adapter *adap);
-extern int i2c_add_numbered_adapter(struct i2c_adapter *adap);
+int i2c_add_adapter(struct i2c_adapter *adap);
+void i2c_del_adapter(struct i2c_adapter *adap);
+int i2c_add_numbered_adapter(struct i2c_adapter *adap);
-extern int i2c_register_driver(struct module *owner, struct i2c_driver *driver);
-extern void i2c_del_driver(struct i2c_driver *driver);
+int i2c_register_driver(struct module *owner, struct i2c_driver *driver);
+void i2c_del_driver(struct i2c_driver *driver);
/* use a define to avoid include chaining to get THIS_MODULE */
#define i2c_add_driver(driver) \
i2c_register_driver(THIS_MODULE, driver)
-extern struct i2c_client *i2c_use_client(struct i2c_client *client);
-extern void i2c_release_client(struct i2c_client *client);
+static inline bool i2c_client_has_driver(struct i2c_client *client)
+{
+ return !IS_ERR_OR_NULL(client) && client->dev.driver;
+}
/* call the i2c_client->command() of all attached clients with
* the given arguments */
-extern void i2c_clients_command(struct i2c_adapter *adap,
- unsigned int cmd, void *arg);
+void i2c_clients_command(struct i2c_adapter *adap,
+ unsigned int cmd, void *arg);
-extern struct i2c_adapter *i2c_get_adapter(int nr);
-extern void i2c_put_adapter(struct i2c_adapter *adap);
-extern unsigned int i2c_adapter_depth(struct i2c_adapter *adapter);
+struct i2c_adapter *i2c_get_adapter(int nr);
+void i2c_put_adapter(struct i2c_adapter *adap);
+unsigned int i2c_adapter_depth(struct i2c_adapter *adapter);
void i2c_parse_fw_timings(struct device *dev, struct i2c_timings *t, bool use_defaults);
@@ -926,15 +930,15 @@ int i2c_handle_smbus_host_notify(struct i2c_adapter *adap, unsigned short addr);
#if IS_ENABLED(CONFIG_OF)
/* must call put_device() when done with returned i2c_client device */
-extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
+struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
/* must call put_device() when done with returned i2c_adapter device */
-extern struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
+struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node);
/* must call i2c_put_adapter() when done with returned i2c_adapter device */
struct i2c_adapter *of_get_i2c_adapter_by_node(struct device_node *node);
-extern const struct of_device_id
+const struct of_device_id
*i2c_of_match_device(const struct of_device_id *matches,
struct i2c_client *client);
diff --git a/include/linux/icmp.h b/include/linux/icmp.h
index 2d8aaf7d4b9e..81ca84ce3119 100644
--- a/include/linux/icmp.h
+++ b/include/linux/icmp.h
@@ -20,4 +20,19 @@ static inline struct icmphdr *icmp_hdr(const struct sk_buff *skb)
{
return (struct icmphdr *)skb_transport_header(skb);
}
+
+static inline bool icmp_is_err(int type)
+{
+ switch (type) {
+ case ICMP_DEST_UNREACH:
+ case ICMP_SOURCE_QUENCH:
+ case ICMP_REDIRECT:
+ case ICMP_TIME_EXCEEDED:
+ case ICMP_PARAMETERPROB:
+ return true;
+ }
+
+ return false;
+}
+
#endif /* _LINUX_ICMP_H */
diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h
index a8f888976137..33d379602314 100644
--- a/include/linux/icmpv6.h
+++ b/include/linux/icmpv6.h
@@ -22,12 +22,22 @@ extern int inet6_unregister_icmp_sender(ip6_icmp_send_t *fn);
int ip6_err_gen_icmpv6_unreach(struct sk_buff *skb, int nhs, int type,
unsigned int data_len);
+#if IS_ENABLED(CONFIG_NF_NAT)
+void icmpv6_ndo_send(struct sk_buff *skb_in, u8 type, u8 code, __u32 info);
+#else
+#define icmpv6_ndo_send icmpv6_send
+#endif
+
#else
static inline void icmpv6_send(struct sk_buff *skb,
u8 type, u8 code, __u32 info)
{
+}
+static inline void icmpv6_ndo_send(struct sk_buff *skb,
+ u8 type, u8 code, __u32 info)
+{
}
#endif
@@ -46,4 +56,18 @@ extern void icmpv6_flow_init(struct sock *sk,
const struct in6_addr *saddr,
const struct in6_addr *daddr,
int oif);
+
+static inline bool icmpv6_is_err(int type)
+{
+ switch (type) {
+ case ICMPV6_DEST_UNREACH:
+ case ICMPV6_PKT_TOOBIG:
+ case ICMPV6_TIME_EXCEED:
+ case ICMPV6_PARAMPROB:
+ return true;
+ }
+
+ return false;
+}
+
#endif
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 46b771d6999e..a254841bd315 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -413,6 +413,8 @@ struct ide_disk_ops {
sector_t);
int (*ioctl)(struct ide_drive_s *, struct block_device *,
fmode_t, unsigned int, unsigned long);
+ int (*compat_ioctl)(struct ide_drive_s *, struct block_device *,
+ fmode_t, unsigned int, unsigned long);
};
/* ATAPI device flags */
@@ -943,6 +945,10 @@ ide_devset_get(_name, _field); \
ide_devset_set(_name, _field); \
IDE_DEVSET(_name, DS_SYNC, get_##_name, set_##_name)
+#define ide_devset_ro_field(_name, _field) \
+ide_devset_get(_name, _field); \
+IDE_DEVSET(_name, 0, get_##_name, NULL)
+
#define ide_devset_rw_flag(_name, _field) \
ide_devset_get_flag(_name, _field); \
ide_devset_set_flag(_name, _field); \
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 7d3f2ced92d1..73c66a3a33ae 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -2102,14 +2102,14 @@ ieee80211_he_spr_size(const u8 *he_spr_ie)
{
struct ieee80211_he_spr *he_spr = (void *)he_spr_ie;
u8 spr_len = sizeof(struct ieee80211_he_spr);
- u32 he_spr_params;
+ u8 he_spr_params;
/* Make sure the input is not NULL */
if (!he_spr_ie)
return 0;
/* Calc required length */
- he_spr_params = le32_to_cpu(he_spr->he_sr_control);
+ he_spr_params = he_spr->he_sr_control;
if (he_spr_params & IEEE80211_HE_SPR_NON_SRG_OFFSET_PRESENT)
spr_len++;
if (he_spr_params & IEEE80211_HE_SPR_SRG_INFORMATION_PRESENT)
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index 76cf11e905e1..8a9792a6427a 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -24,6 +24,14 @@ static inline struct ethhdr *eth_hdr(const struct sk_buff *skb)
return (struct ethhdr *)skb_mac_header(skb);
}
+/* Prefer this version in TX path, instead of
+ * skb_reset_mac_header() + eth_hdr()
+ */
+static inline struct ethhdr *skb_eth_hdr(const struct sk_buff *skb)
+{
+ return (struct ethhdr *)skb->data;
+}
+
static inline struct ethhdr *inner_eth_hdr(const struct sk_buff *skb)
{
return (struct ethhdr *)skb_inner_mac_header(skb);
diff --git a/include/linux/iio/accel/kxcjk_1013.h b/include/linux/iio/accel/kxcjk_1013.h
index 8c3c78bc9f91..ea0ecb774371 100644
--- a/include/linux/iio/accel/kxcjk_1013.h
+++ b/include/linux/iio/accel/kxcjk_1013.h
@@ -7,8 +7,11 @@
#ifndef __IIO_KXCJK_1013_H__
#define __IIO_KXCJK_1013_H__
+#include <linux/iio/iio.h>
+
struct kxcjk_1013_platform_data {
bool active_high_intr;
+ struct iio_mount_matrix orientation;
};
#endif
diff --git a/include/linux/iio/adc/ad_sigma_delta.h b/include/linux/iio/adc/ad_sigma_delta.h
index 7716fa0c9fce..5a127c0ed200 100644
--- a/include/linux/iio/adc/ad_sigma_delta.h
+++ b/include/linux/iio/adc/ad_sigma_delta.h
@@ -40,6 +40,7 @@ struct iio_dev;
* @read_mask: Mask for the communications register having the read bit set.
* @data_reg: Address of the data register, if 0 the default address of 0x3 will
* be used.
+ * @irq_flags: flags for the interrupt used by the triggered buffer
*/
struct ad_sigma_delta_info {
int (*set_channel)(struct ad_sigma_delta *, unsigned int channel);
@@ -49,6 +50,7 @@ struct ad_sigma_delta_info {
unsigned int addr_shift;
unsigned int read_mask;
unsigned int data_reg;
+ unsigned long irq_flags;
};
/**
@@ -119,6 +121,8 @@ int ad_sd_reset(struct ad_sigma_delta *sigma_delta,
int ad_sigma_delta_single_conversion(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, int *val);
+int ad_sd_calibrate(struct ad_sigma_delta *sigma_delta,
+ unsigned int mode, unsigned int channel);
int ad_sd_calibrate_all(struct ad_sigma_delta *sigma_delta,
const struct ad_sd_calib_data *cd, unsigned int n);
int ad_sd_init(struct ad_sigma_delta *sigma_delta, struct iio_dev *indio_dev,
diff --git a/include/linux/iio/buffer_impl.h b/include/linux/iio/buffer_impl.h
index d1171db23742..a4d2d8061ef6 100644
--- a/include/linux/iio/buffer_impl.h
+++ b/include/linux/iio/buffer_impl.h
@@ -18,7 +18,7 @@ struct iio_buffer;
/**
* struct iio_buffer_access_funcs - access functions for buffers.
* @store_to: actually store stuff to the buffer
- * @read_first_n: try to get a specified number of bytes (must exist)
+ * @read: try to get a specified number of bytes (must exist)
* @data_available: indicates how much data is available for reading from
* the buffer.
* @request_update: if a parameter change has been marked, update underlying
@@ -45,9 +45,7 @@ struct iio_buffer;
**/
struct iio_buffer_access_funcs {
int (*store_to)(struct iio_buffer *buffer, const void *data);
- int (*read_first_n)(struct iio_buffer *buffer,
- size_t n,
- char __user *buf);
+ int (*read)(struct iio_buffer *buffer, size_t n, char __user *buf);
size_t (*data_available)(struct iio_buffer *buffer);
int (*request_update)(struct iio_buffer *buffer);
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 686be532f4cb..33e939977444 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -315,16 +315,6 @@ ssize_t st_sensors_sysfs_sampling_frequency_avail(struct device *dev,
ssize_t st_sensors_sysfs_scale_avail(struct device *dev,
struct device_attribute *attr, char *buf);
-#ifdef CONFIG_OF
-void st_sensors_of_name_probe(struct device *dev,
- const struct of_device_id *match,
- char *name, int len);
-#else
-static inline void st_sensors_of_name_probe(struct device *dev,
- const struct of_device_id *match,
- char *name, int len)
-{
-}
-#endif
+void st_sensors_dev_name_probe(struct device *dev, char *name, int len);
#endif /* ST_SENSORS_H */
diff --git a/include/linux/iio/common/st_sensors_i2c.h b/include/linux/iio/common/st_sensors_i2c.h
index 01e424e2af4f..5f15cf01036c 100644
--- a/include/linux/iio/common/st_sensors_i2c.h
+++ b/include/linux/iio/common/st_sensors_i2c.h
@@ -12,18 +12,8 @@
#include <linux/i2c.h>
#include <linux/iio/common/st_sensors.h>
-#include <linux/of.h>
int st_sensors_i2c_configure(struct iio_dev *indio_dev,
struct i2c_client *client);
-#ifdef CONFIG_ACPI
-int st_sensors_match_acpi_device(struct device *dev);
-#else
-static inline int st_sensors_match_acpi_device(struct device *dev)
-{
- return -ENODEV;
-}
-#endif
-
#endif /* ST_SENSORS_I2C_H */
diff --git a/include/linux/iio/frequency/adf4350.h b/include/linux/iio/frequency/adf4350.h
index ce9490bfeb89..de45cf2ee1e4 100644
--- a/include/linux/iio/frequency/adf4350.h
+++ b/include/linux/iio/frequency/adf4350.h
@@ -103,9 +103,6 @@
* @r2_user_settings: User defined settings for ADF4350/1 REGISTER_2.
* @r3_user_settings: User defined settings for ADF4350/1 REGISTER_3.
* @r4_user_settings: User defined settings for ADF4350/1 REGISTER_4.
- * @gpio_lock_detect: Optional, if set with a valid GPIO number,
- * pll lock state is tested upon read.
- * If not used - set to -1.
*/
struct adf4350_platform_data {
@@ -121,7 +118,6 @@ struct adf4350_platform_data {
unsigned r2_user_settings;
unsigned r3_user_settings;
unsigned r4_user_settings;
- int gpio_lock_detect;
};
#endif /* IIO_PLL_ADF4350_H_ */
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 8e132cf819e4..eed58ed2f368 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -510,6 +510,7 @@ struct iio_buffer_setup_ops {
* attributes
* @chan_attr_group: [INTERN] group for all attrs in base directory
* @name: [DRIVER] name of the device.
+ * @label: [DRIVER] unique name to identify which device this is
* @info: [DRIVER] callbacks and constant info from driver
* @clock_id: [INTERN] timestamping clock posix identifier
* @info_exist_lock: [INTERN] lock to prevent use during removal
@@ -553,6 +554,7 @@ struct iio_dev {
struct list_head channel_attr_list;
struct attribute_group chan_attr_group;
const char *name;
+ const char *label;
const struct iio_info *info;
clockid_t clock_id;
struct mutex info_exist_lock;
@@ -566,6 +568,8 @@ struct iio_dev {
#if defined(CONFIG_DEBUG_FS)
struct dentry *debugfs_dentry;
unsigned cached_reg_addr;
+ char read_buf[20];
+ unsigned int read_buf_len;
#endif
};
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index 4c53815bb729..dd8219138c2e 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -23,6 +23,17 @@ struct adis;
struct adis_burst;
/**
+ * struct adis_timeouts - ADIS chip variant timeouts
+ * @reset_ms - Wait time after rst pin goes inactive
+ * @sw_reset_ms - Wait time after sw reset command
+ * @self_test_ms - Wait time after self test command
+ */
+struct adis_timeout {
+ u16 reset_ms;
+ u16 sw_reset_ms;
+ u16 self_test_ms;
+};
+/**
* struct adis_data - ADIS chip variant specific data
* @read_delay: SPI delay for read operations in us
* @write_delay: SPI delay for write operations in us
@@ -30,8 +41,16 @@ struct adis_burst;
* @glob_cmd_reg: Register address of the GLOB_CMD register
* @msc_ctrl_reg: Register address of the MSC_CTRL register
* @diag_stat_reg: Register address of the DIAG_STAT register
+ * @prod_id_reg: Register address of the PROD_ID register
+ * @prod_id: Product ID code that should be expected when reading @prod_id_reg
+ * @self_test_mask: Bitmask of supported self-test operations
+ * @self_test_reg: Register address to request self test command
+ * @self_test_no_autoclear: True if device's self-test needs clear of ctrl reg
* @status_error_msgs: Array of error messgaes
- * @status_error_mask:
+ * @status_error_mask: Bitmask of errors supported by the device
+ * @timeouts: Chip specific delays
+ * @enable_irq: Hook for ADIS devices that have a special IRQ enable/disable
+ * @has_paging: True if ADIS device has paged registers
*/
struct adis_data {
unsigned int read_delay;
@@ -41,10 +60,14 @@ struct adis_data {
unsigned int glob_cmd_reg;
unsigned int msc_ctrl_reg;
unsigned int diag_stat_reg;
+ unsigned int prod_id_reg;
+
+ unsigned int prod_id;
unsigned int self_test_mask;
+ unsigned int self_test_reg;
bool self_test_no_autoclear;
- unsigned int startup_delay;
+ const struct adis_timeout *timeouts;
const char * const *status_error_msgs;
unsigned int status_error_mask;
@@ -54,6 +77,20 @@ struct adis_data {
bool has_paging;
};
+/**
+ * struct adis - ADIS device instance data
+ * @spi: Reference to SPI device which owns this ADIS IIO device
+ * @trig: IIO trigger object data
+ * @data: ADIS chip variant specific data
+ * @burst: ADIS burst transfer information
+ * @state_lock: Lock used by the device to protect state
+ * @msg: SPI message object
+ * @xfer: SPI transfer objects to be used for a @msg
+ * @current_page: Some ADIS devices have registers, this selects current page
+ * @buffer: Data buffer for information read from the device
+ * @tx: DMA safe TX buffer for SPI transfers
+ * @rx: DMA safe RX buffer for SPI transfers
+ */
struct adis {
struct spi_device *spi;
struct iio_trigger *trig;
@@ -61,7 +98,18 @@ struct adis {
const struct adis_data *data;
struct adis_burst *burst;
- struct mutex txrx_lock;
+ /**
+ * The state_lock is meant to be used during operations that require
+ * a sequence of SPI R/W in order to protect the SPI transfer
+ * information (fields 'xfer', 'msg' & 'current_page') between
+ * potential concurrent accesses.
+ * This lock is used by all "adis_{functions}" that have to read/write
+ * registers. These functions also have unlocked variants
+ * (see "__adis_{functions}"), which don't hold this lock.
+ * This allows users of the ADIS library to group SPI R/W into
+ * the drivers, but they also must manage this lock themselves.
+ */
+ struct mutex state_lock;
struct spi_message msg;
struct spi_transfer *xfer;
unsigned int current_page;
@@ -73,14 +121,143 @@ struct adis {
int adis_init(struct adis *adis, struct iio_dev *indio_dev,
struct spi_device *spi, const struct adis_data *data);
-int adis_reset(struct adis *adis);
+int __adis_reset(struct adis *adis);
+
+/**
+ * adis_reset() - Reset the device
+ * @adis: The adis device
+ *
+ * Returns 0 on success, a negative error code otherwise
+ */
+static inline int adis_reset(struct adis *adis)
+{
+ int ret;
+
+ mutex_lock(&adis->state_lock);
+ ret = __adis_reset(adis);
+ mutex_unlock(&adis->state_lock);
-int adis_write_reg(struct adis *adis, unsigned int reg,
+ return ret;
+}
+
+int __adis_write_reg(struct adis *adis, unsigned int reg,
unsigned int val, unsigned int size);
-int adis_read_reg(struct adis *adis, unsigned int reg,
+int __adis_read_reg(struct adis *adis, unsigned int reg,
unsigned int *val, unsigned int size);
/**
+ * __adis_write_reg_8() - Write single byte to a register (unlocked)
+ * @adis: The adis device
+ * @reg: The address of the register to be written
+ * @value: The value to write
+ */
+static inline int __adis_write_reg_8(struct adis *adis, unsigned int reg,
+ uint8_t val)
+{
+ return __adis_write_reg(adis, reg, val, 1);
+}
+
+/**
+ * __adis_write_reg_16() - Write 2 bytes to a pair of registers (unlocked)
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @value: Value to be written
+ */
+static inline int __adis_write_reg_16(struct adis *adis, unsigned int reg,
+ uint16_t val)
+{
+ return __adis_write_reg(adis, reg, val, 2);
+}
+
+/**
+ * __adis_write_reg_32() - write 4 bytes to four registers (unlocked)
+ * @adis: The adis device
+ * @reg: The address of the lower of the four register
+ * @value: Value to be written
+ */
+static inline int __adis_write_reg_32(struct adis *adis, unsigned int reg,
+ uint32_t val)
+{
+ return __adis_write_reg(adis, reg, val, 4);
+}
+
+/**
+ * __adis_read_reg_16() - read 2 bytes from a 16-bit register (unlocked)
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @val: The value read back from the device
+ */
+static inline int __adis_read_reg_16(struct adis *adis, unsigned int reg,
+ uint16_t *val)
+{
+ unsigned int tmp;
+ int ret;
+
+ ret = __adis_read_reg(adis, reg, &tmp, 2);
+ if (ret == 0)
+ *val = tmp;
+
+ return ret;
+}
+
+/**
+ * __adis_read_reg_32() - read 4 bytes from a 32-bit register (unlocked)
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @val: The value read back from the device
+ */
+static inline int __adis_read_reg_32(struct adis *adis, unsigned int reg,
+ uint32_t *val)
+{
+ unsigned int tmp;
+ int ret;
+
+ ret = __adis_read_reg(adis, reg, &tmp, 4);
+ if (ret == 0)
+ *val = tmp;
+
+ return ret;
+}
+
+/**
+ * adis_write_reg() - write N bytes to register
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @value: The value to write to device (up to 4 bytes)
+ * @size: The size of the @value (in bytes)
+ */
+static inline int adis_write_reg(struct adis *adis, unsigned int reg,
+ unsigned int val, unsigned int size)
+{
+ int ret;
+
+ mutex_lock(&adis->state_lock);
+ ret = __adis_write_reg(adis, reg, val, size);
+ mutex_unlock(&adis->state_lock);
+
+ return ret;
+}
+
+/**
+ * adis_read_reg() - read N bytes from register
+ * @adis: The adis device
+ * @reg: The address of the lower of the two registers
+ * @val: The value read back from the device
+ * @size: The size of the @val buffer
+ */
+static int adis_read_reg(struct adis *adis, unsigned int reg,
+ unsigned int *val, unsigned int size)
+{
+ int ret;
+
+ mutex_lock(&adis->state_lock);
+ ret = __adis_read_reg(adis, reg, val, size);
+ mutex_unlock(&adis->state_lock);
+
+ return ret;
+}
+
+/**
* adis_write_reg_8() - Write single byte to a register
* @adis: The adis device
* @reg: The address of the register to be written
@@ -129,7 +306,8 @@ static inline int adis_read_reg_16(struct adis *adis, unsigned int reg,
int ret;
ret = adis_read_reg(adis, reg, &tmp, 2);
- *val = tmp;
+ if (ret == 0)
+ *val = tmp;
return ret;
}
@@ -147,15 +325,38 @@ static inline int adis_read_reg_32(struct adis *adis, unsigned int reg,
int ret;
ret = adis_read_reg(adis, reg, &tmp, 4);
- *val = tmp;
+ if (ret == 0)
+ *val = tmp;
return ret;
}
int adis_enable_irq(struct adis *adis, bool enable);
-int adis_check_status(struct adis *adis);
+int __adis_check_status(struct adis *adis);
+int __adis_initial_startup(struct adis *adis);
-int adis_initial_startup(struct adis *adis);
+static inline int adis_check_status(struct adis *adis)
+{
+ int ret;
+
+ mutex_lock(&adis->state_lock);
+ ret = __adis_check_status(adis);
+ mutex_unlock(&adis->state_lock);
+
+ return ret;
+}
+
+/* locked version of __adis_initial_startup() */
+static inline int adis_initial_startup(struct adis *adis)
+{
+ int ret;
+
+ mutex_lock(&adis->state_lock);
+ ret = __adis_initial_startup(adis);
+ mutex_unlock(&adis->state_lock);
+
+ return ret;
+}
int adis_single_conversion(struct iio_dev *indio_dev,
const struct iio_chan_spec *chan, unsigned int error_mask,
diff --git a/include/linux/iio/magnetometer/ak8975.h b/include/linux/iio/magnetometer/ak8975.h
deleted file mode 100644
index ac9366f807cb..000000000000
--- a/include/linux/iio/magnetometer/ak8975.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __IIO_MAGNETOMETER_AK8975_H__
-#define __IIO_MAGNETOMETER_AK8975_H__
-
-#include <linux/iio/iio.h>
-
-/**
- * struct ak8975_platform_data - AK8975 magnetometer driver platform data
- * @eoc_gpio: data ready event gpio
- * @orientation: mounting matrix relative to main hardware
- */
-struct ak8975_platform_data {
- int eoc_gpio;
- struct iio_mount_matrix orientation;
-};
-
-#endif
diff --git a/include/linux/iio/types.h b/include/linux/iio/types.h
index fa824e160f35..e6fd3645963c 100644
--- a/include/linux/iio/types.h
+++ b/include/linux/iio/types.h
@@ -25,6 +25,7 @@ enum iio_event_info {
#define IIO_VAL_INT_MULTIPLE 5
#define IIO_VAL_FRACTIONAL 10
#define IIO_VAL_FRACTIONAL_LOG2 11
+#define IIO_VAL_CHAR 12
enum iio_available_type {
IIO_AVAIL_LIST,
@@ -57,6 +58,7 @@ enum iio_chan_info_enum {
IIO_CHAN_INFO_DEBOUNCE_TIME,
IIO_CHAN_INFO_CALIBEMISSIVITY,
IIO_CHAN_INFO_OVERSAMPLING_RATIO,
+ IIO_CHAN_INFO_THERMOCOUPLE_TYPE,
};
#endif /* _IIO_TYPES_H_ */
diff --git a/include/linux/ima.h b/include/linux/ima.h
index 1c37f17f7203..1659217e9b60 100644
--- a/include/linux/ima.h
+++ b/include/linux/ima.h
@@ -23,13 +23,15 @@ extern int ima_read_file(struct file *file, enum kernel_read_file_id id);
extern int ima_post_read_file(struct file *file, void *buf, loff_t size,
enum kernel_read_file_id id);
extern void ima_post_path_mknod(struct dentry *dentry);
+extern int ima_file_hash(struct file *file, char *buf, size_t buf_size);
extern void ima_kexec_cmdline(const void *buf, int size);
#ifdef CONFIG_IMA_KEXEC
extern void ima_add_kexec_buffer(struct kimage *image);
#endif
-#if (defined(CONFIG_X86) && defined(CONFIG_EFI)) || defined(CONFIG_S390)
+#if (defined(CONFIG_X86) && defined(CONFIG_EFI)) || defined(CONFIG_S390) \
+ || defined(CONFIG_PPC_SECURE_BOOT)
extern bool arch_ima_get_secureboot(void);
extern const char * const *arch_get_ima_policy(void);
#else
@@ -90,6 +92,11 @@ static inline void ima_post_path_mknod(struct dentry *dentry)
return;
}
+static inline int ima_file_hash(struct file *file, char *buf, size_t buf_size)
+{
+ return -EOPNOTSUPP;
+}
+
static inline void ima_kexec_cmdline(const void *buf, int size) {}
#endif /* CONFIG_IMA */
@@ -100,6 +107,20 @@ static inline void ima_add_kexec_buffer(struct kimage *image)
{}
#endif
+#ifdef CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS
+extern void ima_post_key_create_or_update(struct key *keyring,
+ struct key *key,
+ const void *payload, size_t plen,
+ unsigned long flags, bool create);
+#else
+static inline void ima_post_key_create_or_update(struct key *keyring,
+ struct key *key,
+ const void *payload,
+ size_t plen,
+ unsigned long flags,
+ bool create) {}
+#endif /* CONFIG_IMA_MEASURE_ASYMMETRIC_KEYS */
+
#ifdef CONFIG_IMA_APPRAISE
extern bool is_ima_appraise_enabled(void);
extern void ima_inode_post_setattr(struct dentry *dentry);
diff --git a/include/linux/inet_diag.h b/include/linux/inet_diag.h
index 39faaaf843e1..c91cf2dee12a 100644
--- a/include/linux/inet_diag.h
+++ b/include/linux/inet_diag.h
@@ -2,15 +2,10 @@
#ifndef _INET_DIAG_H_
#define _INET_DIAG_H_ 1
+#include <net/netlink.h>
#include <uapi/linux/inet_diag.h>
-struct net;
-struct sock;
struct inet_hashinfo;
-struct nlattr;
-struct nlmsghdr;
-struct sk_buff;
-struct netlink_callback;
struct inet_diag_handler {
void (*dump)(struct sk_buff *skb,
@@ -62,6 +57,17 @@ int inet_diag_bc_sk(const struct nlattr *_bc, struct sock *sk);
void inet_diag_msg_common_fill(struct inet_diag_msg *r, struct sock *sk);
+static inline size_t inet_diag_msg_attrs_size(void)
+{
+ return nla_total_size(1) /* INET_DIAG_SHUTDOWN */
+ + nla_total_size(1) /* INET_DIAG_TOS */
+#if IS_ENABLED(CONFIG_IPV6)
+ + nla_total_size(1) /* INET_DIAG_TCLASS */
+ + nla_total_size(1) /* INET_DIAG_SKV6ONLY */
+#endif
+ + nla_total_size(4) /* INET_DIAG_MARK */
+ + nla_total_size(4); /* INET_DIAG_CLASS_ID */
+}
int inet_diag_msg_attrs_fill(struct sock *sk, struct sk_buff *skb,
struct inet_diag_msg *r, int ext,
struct user_namespace *user_ns, bool net_admin);
diff --git a/include/linux/initrd.h b/include/linux/initrd.h
index d77fe34fb00a..aa5914355728 100644
--- a/include/linux/initrd.h
+++ b/include/linux/initrd.h
@@ -28,3 +28,5 @@ extern unsigned int real_root_dev;
extern char __initramfs_start[];
extern unsigned long __initramfs_size;
+
+void console_on_rootfs(void);
diff --git a/include/linux/input.h b/include/linux/input.h
index 94f277cd806a..56f2fd32e609 100644
--- a/include/linux/input.h
+++ b/include/linux/input.h
@@ -383,6 +383,7 @@ int input_setup_polling(struct input_dev *dev,
void input_set_poll_interval(struct input_dev *dev, unsigned int interval);
void input_set_min_poll_interval(struct input_dev *dev, unsigned int interval);
void input_set_max_poll_interval(struct input_dev *dev, unsigned int interval);
+int input_get_poll_interval(struct input_dev *dev);
int __must_check input_register_handler(struct input_handler *);
void input_unregister_handler(struct input_handler *);
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 6d8bf4bdf240..980234ae0312 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -34,10 +34,13 @@
#define VTD_STRIDE_SHIFT (9)
#define VTD_STRIDE_MASK (((u64)-1) << VTD_STRIDE_SHIFT)
-#define DMA_PTE_READ (1)
-#define DMA_PTE_WRITE (2)
-#define DMA_PTE_LARGE_PAGE (1 << 7)
-#define DMA_PTE_SNP (1 << 11)
+#define DMA_PTE_READ BIT_ULL(0)
+#define DMA_PTE_WRITE BIT_ULL(1)
+#define DMA_PTE_LARGE_PAGE BIT_ULL(7)
+#define DMA_PTE_SNP BIT_ULL(11)
+
+#define DMA_FL_PTE_PRESENT BIT_ULL(0)
+#define DMA_FL_PTE_XD BIT_ULL(63)
#define CONTEXT_TT_MULTI_LEVEL 0
#define CONTEXT_TT_DEV_IOTLB 1
@@ -120,6 +123,8 @@
#define dmar_readq(a) readq(a)
#define dmar_writeq(a,v) writeq(v,a)
+#define dmar_readl(a) readl(a)
+#define dmar_writel(a, v) writel(v, a)
#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
#define DMAR_VER_MINOR(v) ((v) & 0x0f)
@@ -435,8 +440,10 @@ enum {
#define VTD_FLAG_TRANS_PRE_ENABLED (1 << 0)
#define VTD_FLAG_IRQ_REMAP_PRE_ENABLED (1 << 1)
+#define VTD_FLAG_SVM_CAPABLE (1 << 2)
extern int intel_iommu_sm;
+extern spinlock_t device_domain_lock;
#define sm_supported(iommu) (intel_iommu_sm && ecap_smts((iommu)->ecap))
#define pasid_supported(iommu) (sm_supported(iommu) && \
@@ -609,10 +616,11 @@ static inline void dma_clear_pte(struct dma_pte *pte)
static inline u64 dma_pte_addr(struct dma_pte *pte)
{
#ifdef CONFIG_64BIT
- return pte->val & VTD_PAGE_MASK;
+ return pte->val & VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
#else
/* Must have a full atomic 64-bit read */
- return __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
+ return __cmpxchg64(&pte->val, 0ULL, 0ULL) &
+ VTD_PAGE_MASK & (~DMA_FL_PTE_XD);
#endif
}
@@ -645,6 +653,8 @@ extern void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
unsigned int size_order, u64 type);
extern void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
u16 qdep, u64 addr, unsigned mask);
+void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
+ unsigned long npages, bool ih);
extern int qi_submit_sync(struct qi_desc *desc, struct intel_iommu *iommu);
extern int dmar_ir_support(void);
@@ -656,9 +666,10 @@ int for_each_device_domain(int (*fn)(struct device_domain_info *info,
void *data), void *data);
void iommu_flush_write_buffer(struct intel_iommu *iommu);
int intel_iommu_enable_pasid(struct intel_iommu *iommu, struct device *dev);
+struct dmar_domain *find_domain(struct device *dev);
#ifdef CONFIG_INTEL_IOMMU_SVM
-int intel_svm_init(struct intel_iommu *iommu);
+extern void intel_svm_check(struct intel_iommu *iommu);
extern int intel_svm_enable_prq(struct intel_iommu *iommu);
extern int intel_svm_finish_prq(struct intel_iommu *iommu);
@@ -686,6 +697,8 @@ struct intel_svm {
};
extern struct intel_iommu *intel_svm_device_to_iommu(struct device *dev);
+#else
+static inline void intel_svm_check(struct intel_iommu *iommu) {}
#endif
#ifdef CONFIG_INTEL_IOMMU_DEBUGFS
diff --git a/include/linux/intel-svm.h b/include/linux/intel-svm.h
index 94f047a8a845..d7c403d0dd27 100644
--- a/include/linux/intel-svm.h
+++ b/include/linux/intel-svm.h
@@ -122,7 +122,7 @@ static inline int intel_svm_unbind_mm(struct device *dev, int pasid)
BUG();
}
-static int intel_svm_is_pasid_valid(struct device *dev, int pasid)
+static inline int intel_svm_is_pasid_valid(struct device *dev, int pasid)
{
return -EINVAL;
}
diff --git a/include/linux/interconnect-provider.h b/include/linux/interconnect-provider.h
index b16f9effa555..0c494534b4d3 100644
--- a/include/linux/interconnect-provider.h
+++ b/include/linux/interconnect-provider.h
@@ -92,17 +92,26 @@ struct icc_node {
#if IS_ENABLED(CONFIG_INTERCONNECT)
+int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak);
struct icc_node *icc_node_create(int id);
void icc_node_destroy(int id);
int icc_link_create(struct icc_node *node, const int dst_id);
int icc_link_destroy(struct icc_node *src, struct icc_node *dst);
void icc_node_add(struct icc_node *node, struct icc_provider *provider);
void icc_node_del(struct icc_node *node);
+int icc_nodes_remove(struct icc_provider *provider);
int icc_provider_add(struct icc_provider *provider);
int icc_provider_del(struct icc_provider *provider);
#else
+static inline int icc_std_aggregate(struct icc_node *node, u32 tag, u32 avg_bw,
+ u32 peak_bw, u32 *agg_avg, u32 *agg_peak)
+{
+ return -ENOTSUPP;
+}
+
static inline struct icc_node *icc_node_create(int id)
{
return ERR_PTR(-ENOTSUPP);
@@ -130,6 +139,11 @@ void icc_node_del(struct icc_node *node)
{
}
+static inline int icc_nodes_remove(struct icc_provider *provider)
+{
+ return -ENOTSUPP;
+}
+
static inline int icc_provider_add(struct icc_provider *provider)
{
return -ENOTSUPP;
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 89fc59dab57d..80f637c3a6f3 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -140,6 +140,19 @@ request_threaded_irq(unsigned int irq, irq_handler_t handler,
irq_handler_t thread_fn,
unsigned long flags, const char *name, void *dev);
+/**
+ * request_irq - Add a handler for an interrupt line
+ * @irq: The interrupt line to allocate
+ * @handler: Function to be called when the IRQ occurs.
+ * Primary handler for threaded interrupts
+ * If NULL, the default primary handler is installed
+ * @flags: Handling flags
+ * @name: Name of the device generating this interrupt
+ * @dev: A cookie passed to the handler function
+ *
+ * This call allocates an interrupt and establishes a handler; see
+ * the documentation for request_threaded_irq() for details.
+ */
static inline int __must_check
request_irq(unsigned int irq, irq_handler_t handler, unsigned long flags,
const char *name, void *dev)
@@ -235,6 +248,8 @@ extern void enable_percpu_nmi(unsigned int irq, unsigned int type);
extern int prepare_percpu_nmi(unsigned int irq);
extern void teardown_percpu_nmi(unsigned int irq);
+extern int irq_inject_interrupt(unsigned int irq);
+
/* The following three functions are for the core kernel use only. */
extern void suspend_device_irqs(void);
extern void resume_device_irqs(void);
@@ -520,8 +535,7 @@ enum
IRQ_POLL_SOFTIRQ,
TASKLET_SOFTIRQ,
SCHED_SOFTIRQ,
- HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
- numbering. Sigh! */
+ HRTIMER_SOFTIRQ,
RCU_SOFTIRQ, /* Preferable RCU should always be the last softirq */
NR_SOFTIRQS
diff --git a/include/linux/io-mapping.h b/include/linux/io-mapping.h
index 6e125e9b4187..b336622612f3 100644
--- a/include/linux/io-mapping.h
+++ b/include/linux/io-mapping.h
@@ -16,7 +16,7 @@
* The io_mapping mechanism provides an abstraction for mapping
* individual pages from an io device to the CPU in an efficient fashion.
*
- * See Documentation/io-mapping.txt
+ * See Documentation/driver-api/io-mapping.rst
*/
struct io_mapping {
@@ -28,6 +28,7 @@ struct io_mapping {
#ifdef CONFIG_HAVE_ATOMIC_IOMAP
+#include <linux/pfn.h>
#include <asm/iomap.h>
/*
* For small address space machines, mapping large objects
@@ -64,12 +65,10 @@ io_mapping_map_atomic_wc(struct io_mapping *mapping,
unsigned long offset)
{
resource_size_t phys_addr;
- unsigned long pfn;
BUG_ON(offset >= mapping->size);
phys_addr = mapping->base + offset;
- pfn = (unsigned long) (phys_addr >> PAGE_SHIFT);
- return iomap_atomic_prot_pfn(pfn, mapping->prot);
+ return iomap_atomic_prot_pfn(PHYS_PFN(phys_addr), mapping->prot);
}
static inline void
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index ec7a13405f10..53d53c6c2be9 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -83,12 +83,16 @@ struct io_pgtable_cfg {
* IO_PGTABLE_QUIRK_NON_STRICT: Skip issuing synchronous leaf TLBIs
* on unmap, for DMA domains using the flush queue mechanism for
* delayed invalidation.
+ *
+ * IO_PGTABLE_QUIRK_ARM_TTBR1: (ARM LPAE format) Configure the table
+ * for use in the upper half of a split address space.
*/
#define IO_PGTABLE_QUIRK_ARM_NS BIT(0)
#define IO_PGTABLE_QUIRK_NO_PERMS BIT(1)
#define IO_PGTABLE_QUIRK_TLBI_ON_MAP BIT(2)
#define IO_PGTABLE_QUIRK_ARM_MTK_EXT BIT(3)
#define IO_PGTABLE_QUIRK_NON_STRICT BIT(4)
+ #define IO_PGTABLE_QUIRK_ARM_TTBR1 BIT(5)
unsigned long quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
@@ -100,18 +104,33 @@ struct io_pgtable_cfg {
/* Low-level data specific to the table format */
union {
struct {
- u64 ttbr[2];
- u64 tcr;
- u64 mair[2];
+ u64 ttbr;
+ struct {
+ u32 ips:3;
+ u32 tg:2;
+ u32 sh:2;
+ u32 orgn:2;
+ u32 irgn:2;
+ u32 tsz:6;
+ } tcr;
+ u64 mair;
} arm_lpae_s1_cfg;
struct {
u64 vttbr;
- u64 vtcr;
+ struct {
+ u32 ps:3;
+ u32 tg:2;
+ u32 sh:2;
+ u32 orgn:2;
+ u32 irgn:2;
+ u32 sl:2;
+ u32 tsz:6;
+ } vtcr;
} arm_lpae_s2_cfg;
struct {
- u32 ttbr[2];
+ u32 ttbr;
u32 tcr;
u32 nmrr;
u32 prrr;
diff --git a/include/linux/io.h b/include/linux/io.h
index accac822336a..b1c44bb4b2d7 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -64,7 +64,7 @@ static inline void devm_ioport_unmap(struct device *dev, void __iomem *addr)
void __iomem *devm_ioremap(struct device *dev, resource_size_t offset,
resource_size_t size);
-void __iomem *devm_ioremap_nocache(struct device *dev, resource_size_t offset,
+void __iomem *devm_ioremap_uc(struct device *dev, resource_size_t offset,
resource_size_t size);
void __iomem *devm_ioremap_wc(struct device *dev, resource_size_t offset,
resource_size_t size);
@@ -85,7 +85,7 @@ void *__devm_memremap_pages(struct device *dev, struct resource *res);
* Posting") mandate non-posted configuration transactions. There is
* no ioremap API in the kernel that can guarantee non-posted write
* semantics across arches so provide a default implementation for
- * mapping PCI config space that defaults to ioremap_nocache(); arches
+ * mapping PCI config space that defaults to ioremap(); arches
* should override it if they have memory mapping implementations that
* guarantee non-posted writes semantics to make the memory mapping
* compliant with the PCI specification.
@@ -95,7 +95,7 @@ void *__devm_memremap_pages(struct device *dev, struct resource *res);
static inline void __iomem *pci_remap_cfgspace(phys_addr_t offset,
size_t size)
{
- return ioremap_nocache(offset, size);
+ return ioremap(offset, size);
}
#endif
#endif
diff --git a/include/linux/ioasid.h b/include/linux/ioasid.h
new file mode 100644
index 000000000000..6f000d7a0ddc
--- /dev/null
+++ b/include/linux/ioasid.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_IOASID_H
+#define __LINUX_IOASID_H
+
+#include <linux/types.h>
+#include <linux/errno.h>
+
+#define INVALID_IOASID ((ioasid_t)-1)
+typedef unsigned int ioasid_t;
+typedef ioasid_t (*ioasid_alloc_fn_t)(ioasid_t min, ioasid_t max, void *data);
+typedef void (*ioasid_free_fn_t)(ioasid_t ioasid, void *data);
+
+struct ioasid_set {
+ int dummy;
+};
+
+/**
+ * struct ioasid_allocator_ops - IOASID allocator helper functions and data
+ *
+ * @alloc: helper function to allocate IOASID
+ * @free: helper function to free IOASID
+ * @list: for tracking ops that share helper functions but not data
+ * @pdata: data belong to the allocator, provided when calling alloc()
+ */
+struct ioasid_allocator_ops {
+ ioasid_alloc_fn_t alloc;
+ ioasid_free_fn_t free;
+ struct list_head list;
+ void *pdata;
+};
+
+#define DECLARE_IOASID_SET(name) struct ioasid_set name = { 0 }
+
+#if IS_ENABLED(CONFIG_IOASID)
+ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
+ void *private);
+void ioasid_free(ioasid_t ioasid);
+void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
+ bool (*getter)(void *));
+int ioasid_register_allocator(struct ioasid_allocator_ops *allocator);
+void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator);
+int ioasid_set_data(ioasid_t ioasid, void *data);
+
+#else /* !CONFIG_IOASID */
+static inline ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min,
+ ioasid_t max, void *private)
+{
+ return INVALID_IOASID;
+}
+
+static inline void ioasid_free(ioasid_t ioasid)
+{
+}
+
+static inline void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
+ bool (*getter)(void *))
+{
+ return NULL;
+}
+
+static inline int ioasid_register_allocator(struct ioasid_allocator_ops *allocator)
+{
+ return -ENOTSUPP;
+}
+
+static inline void ioasid_unregister_allocator(struct ioasid_allocator_ops *allocator)
+{
+}
+
+static inline int ioasid_set_data(ioasid_t ioasid, void *data)
+{
+ return -ENOTSUPP;
+}
+
+#endif /* CONFIG_IOASID */
+#endif /* __LINUX_IOASID_H */
diff --git a/include/linux/ioc3.h b/include/linux/ioc3.h
deleted file mode 100644
index 38b286e9a46c..000000000000
--- a/include/linux/ioc3.h
+++ /dev/null
@@ -1,93 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Copyright (c) 2005 Stanislaw Skowronek <skylark@linux-mips.org>
- */
-
-#ifndef _LINUX_IOC3_H
-#define _LINUX_IOC3_H
-
-#include <asm/sn/ioc3.h>
-
-#define IOC3_MAX_SUBMODULES 32
-
-#define IOC3_CLASS_NONE 0
-#define IOC3_CLASS_BASE_IP27 1
-#define IOC3_CLASS_BASE_IP30 2
-#define IOC3_CLASS_MENET_123 3
-#define IOC3_CLASS_MENET_4 4
-#define IOC3_CLASS_CADDUO 5
-#define IOC3_CLASS_SERIAL 6
-
-/* One of these per IOC3 */
-struct ioc3_driver_data {
- struct list_head list;
- int id; /* IOC3 sequence number */
- /* PCI mapping */
- unsigned long pma; /* physical address */
- struct ioc3 __iomem *vma; /* pointer to registers */
- struct pci_dev *pdev; /* PCI device */
- /* IRQ stuff */
- int dual_irq; /* set if separate IRQs are used */
- int irq_io, irq_eth; /* IRQ numbers */
- /* GPIO magic */
- spinlock_t gpio_lock;
- unsigned int gpdr_shadow;
- /* NIC identifiers */
- char nic_part[32];
- char nic_serial[16];
- char nic_mac[6];
- /* submodule set */
- int class;
- void *data[IOC3_MAX_SUBMODULES]; /* for submodule use */
- int active[IOC3_MAX_SUBMODULES]; /* set if probe succeeds */
- /* is_ir_lock must be held while
- * modifying sio_ie values, so
- * we can be sure that sio_ie is
- * not changing when we read it
- * along with sio_ir.
- */
- spinlock_t ir_lock; /* SIO_IE[SC] mod lock */
-};
-
-/* One per submodule */
-struct ioc3_submodule {
- char *name; /* descriptive submodule name */
- struct module *owner; /* owning kernel module */
- int ethernet; /* set for ethernet drivers */
- int (*probe) (struct ioc3_submodule *, struct ioc3_driver_data *);
- int (*remove) (struct ioc3_submodule *, struct ioc3_driver_data *);
- int id; /* assigned by IOC3, index for the "data" array */
- /* IRQ stuff */
- unsigned int irq_mask; /* IOC3 IRQ mask, leave clear for Ethernet */
- int reset_mask; /* non-zero if you want the ioc3.c module to reset interrupts */
- int (*intr) (struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
- /* private submodule data */
- void *data; /* assigned by submodule */
-};
-
-/**********************************
- * Functions needed by submodules *
- **********************************/
-
-#define IOC3_W_IES 0
-#define IOC3_W_IEC 1
-
-/* registers a submodule for all existing and future IOC3 chips */
-extern int ioc3_register_submodule(struct ioc3_submodule *);
-/* unregisters a submodule */
-extern void ioc3_unregister_submodule(struct ioc3_submodule *);
-/* enables IRQs indicated by irq_mask for a specified IOC3 chip */
-extern void ioc3_enable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
-/* ackowledges specified IRQs */
-extern void ioc3_ack(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
-/* disables IRQs indicated by irq_mask for a specified IOC3 chip */
-extern void ioc3_disable(struct ioc3_submodule *, struct ioc3_driver_data *, unsigned int);
-/* atomically sets GPCR bits */
-extern void ioc3_gpcr_set(struct ioc3_driver_data *, unsigned int);
-/* general ireg writer */
-extern void ioc3_write_ireg(struct ioc3_driver_data *idd, uint32_t value, int reg);
-
-#endif
diff --git a/include/linux/iocontext.h b/include/linux/iocontext.h
index dba15ca8e60b..1dcd9198beb7 100644
--- a/include/linux/iocontext.h
+++ b/include/linux/iocontext.h
@@ -8,6 +8,7 @@
enum {
ICQ_EXITED = 1 << 2,
+ ICQ_DESTROYED = 1 << 3,
};
/*
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 7aa5d6117936..8b09463dae0d 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -4,6 +4,7 @@
#include <linux/atomic.h>
#include <linux/bitmap.h>
+#include <linux/blk_types.h>
#include <linux/mm.h>
#include <linux/types.h>
#include <linux/mm_types.h>
@@ -12,6 +13,7 @@
struct address_space;
struct fiemap_extent_info;
struct inode;
+struct iomap_writepage_ctx;
struct iov_iter;
struct kiocb;
struct page;
@@ -21,28 +23,45 @@ struct vm_fault;
/*
* Types of block ranges for iomap mappings:
*/
-#define IOMAP_HOLE 0x01 /* no blocks allocated, need allocation */
-#define IOMAP_DELALLOC 0x02 /* delayed allocation blocks */
-#define IOMAP_MAPPED 0x03 /* blocks allocated at @addr */
-#define IOMAP_UNWRITTEN 0x04 /* blocks allocated at @addr in unwritten state */
-#define IOMAP_INLINE 0x05 /* data inline in the inode */
+#define IOMAP_HOLE 0 /* no blocks allocated, need allocation */
+#define IOMAP_DELALLOC 1 /* delayed allocation blocks */
+#define IOMAP_MAPPED 2 /* blocks allocated at @addr */
+#define IOMAP_UNWRITTEN 3 /* blocks allocated at @addr in unwritten state */
+#define IOMAP_INLINE 4 /* data inline in the inode */
/*
- * Flags for all iomap mappings:
+ * Flags reported by the file system from iomap_begin:
+ *
+ * IOMAP_F_NEW indicates that the blocks have been newly allocated and need
+ * zeroing for areas that no data is copied to.
*
* IOMAP_F_DIRTY indicates the inode has uncommitted metadata needed to access
* written data and requires fdatasync to commit them to persistent storage.
+ * This needs to take into account metadata changes that *may* be made at IO
+ * completion, such as file size updates from direct IO.
+ *
+ * IOMAP_F_SHARED indicates that the blocks are shared, and will need to be
+ * unshared as part a write.
+ *
+ * IOMAP_F_MERGED indicates that the iomap contains the merge of multiple block
+ * mappings.
+ *
+ * IOMAP_F_BUFFER_HEAD indicates that the file system requires the use of
+ * buffer heads for this mapping.
*/
-#define IOMAP_F_NEW 0x01 /* blocks have been newly allocated */
-#define IOMAP_F_DIRTY 0x02 /* uncommitted metadata */
-#define IOMAP_F_BUFFER_HEAD 0x04 /* file system requires buffer heads */
-#define IOMAP_F_SIZE_CHANGED 0x08 /* file size has changed */
+#define IOMAP_F_NEW 0x01
+#define IOMAP_F_DIRTY 0x02
+#define IOMAP_F_SHARED 0x04
+#define IOMAP_F_MERGED 0x08
+#define IOMAP_F_BUFFER_HEAD 0x10
/*
- * Flags that only need to be reported for IOMAP_REPORT requests:
+ * Flags set by the core iomap code during operations:
+ *
+ * IOMAP_F_SIZE_CHANGED indicates to the iomap_end method that the file size
+ * has changed as the result of this write operation.
*/
-#define IOMAP_F_MERGED 0x10 /* contains multiple blocks/extents */
-#define IOMAP_F_SHARED 0x20 /* block shared with another file */
+#define IOMAP_F_SIZE_CHANGED 0x100
/*
* Flags from 0x1000 up are for file system specific usage:
@@ -110,7 +129,8 @@ struct iomap_ops {
* The actual length is returned in iomap->length.
*/
int (*iomap_begin)(struct inode *inode, loff_t pos, loff_t length,
- unsigned flags, struct iomap *iomap);
+ unsigned flags, struct iomap *iomap,
+ struct iomap *srcmap);
/*
* Commit and/or unreserve space previous allocated using iomap_begin.
@@ -126,29 +146,12 @@ struct iomap_ops {
* Main iomap iterator function.
*/
typedef loff_t (*iomap_actor_t)(struct inode *inode, loff_t pos, loff_t len,
- void *data, struct iomap *iomap);
+ void *data, struct iomap *iomap, struct iomap *srcmap);
loff_t iomap_apply(struct inode *inode, loff_t pos, loff_t length,
unsigned flags, const struct iomap_ops *ops, void *data,
iomap_actor_t actor);
-/*
- * Structure allocate for each page when block size < PAGE_SIZE to track
- * sub-page uptodate status and I/O completions.
- */
-struct iomap_page {
- atomic_t read_count;
- atomic_t write_count;
- DECLARE_BITMAP(uptodate, PAGE_SIZE / 512);
-};
-
-static inline struct iomap_page *to_iomap_page(struct page *page)
-{
- if (page_has_private(page))
- return (struct iomap_page *)page_private(page);
- return NULL;
-}
-
ssize_t iomap_file_buffered_write(struct kiocb *iocb, struct iov_iter *from,
const struct iomap_ops *ops);
int iomap_readpage(struct page *page, const struct iomap_ops *ops);
@@ -166,7 +169,7 @@ int iomap_migrate_page(struct address_space *mapping, struct page *newpage,
#else
#define iomap_migrate_page NULL
#endif
-int iomap_file_dirty(struct inode *inode, loff_t pos, loff_t len,
+int iomap_file_unshare(struct inode *inode, loff_t pos, loff_t len,
const struct iomap_ops *ops);
int iomap_zero_range(struct inode *inode, loff_t pos, loff_t len,
bool *did_zero, const struct iomap_ops *ops);
@@ -184,6 +187,63 @@ sector_t iomap_bmap(struct address_space *mapping, sector_t bno,
const struct iomap_ops *ops);
/*
+ * Structure for writeback I/O completions.
+ */
+struct iomap_ioend {
+ struct list_head io_list; /* next ioend in chain */
+ u16 io_type;
+ u16 io_flags; /* IOMAP_F_* */
+ struct inode *io_inode; /* file being written to */
+ size_t io_size; /* size of the extent */
+ loff_t io_offset; /* offset in the file */
+ void *io_private; /* file system private data */
+ struct bio *io_bio; /* bio being built */
+ struct bio io_inline_bio; /* MUST BE LAST! */
+};
+
+struct iomap_writeback_ops {
+ /*
+ * Required, maps the blocks so that writeback can be performed on
+ * the range starting at offset.
+ */
+ int (*map_blocks)(struct iomap_writepage_ctx *wpc, struct inode *inode,
+ loff_t offset);
+
+ /*
+ * Optional, allows the file systems to perform actions just before
+ * submitting the bio and/or override the bio end_io handler for complex
+ * operations like copy on write extent manipulation or unwritten extent
+ * conversions.
+ */
+ int (*prepare_ioend)(struct iomap_ioend *ioend, int status);
+
+ /*
+ * Optional, allows the file system to discard state on a page where
+ * we failed to submit any I/O.
+ */
+ void (*discard_page)(struct page *page);
+};
+
+struct iomap_writepage_ctx {
+ struct iomap iomap;
+ struct iomap_ioend *ioend;
+ const struct iomap_writeback_ops *ops;
+};
+
+void iomap_finish_ioends(struct iomap_ioend *ioend, int error);
+void iomap_ioend_try_merge(struct iomap_ioend *ioend,
+ struct list_head *more_ioends,
+ void (*merge_private)(struct iomap_ioend *ioend,
+ struct iomap_ioend *next));
+void iomap_sort_ioends(struct list_head *ioend_list);
+int iomap_writepage(struct page *page, struct writeback_control *wbc,
+ struct iomap_writepage_ctx *wpc,
+ const struct iomap_writeback_ops *ops);
+int iomap_writepages(struct address_space *mapping,
+ struct writeback_control *wbc, struct iomap_writepage_ctx *wpc,
+ const struct iomap_writeback_ops *ops);
+
+/*
* Flags for direct I/O ->end_io:
*/
#define IOMAP_DIO_UNWRITTEN (1 << 0) /* covers unwritten extent(s) */
@@ -195,7 +255,8 @@ struct iomap_dio_ops {
};
ssize_t iomap_dio_rw(struct kiocb *iocb, struct iov_iter *iter,
- const struct iomap_ops *ops, const struct iomap_dio_ops *dops);
+ const struct iomap_ops *ops, const struct iomap_dio_ops *dops,
+ bool wait_for_completion);
int iomap_dio_iopoll(struct kiocb *kiocb, bool spin);
#ifdef CONFIG_SWAP
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 29bac5345563..d1b5f4d98569 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -13,6 +13,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/ioasid.h>
#include <uapi/linux/iommu.h>
#define IOMMU_READ (1 << 0)
@@ -31,11 +32,11 @@
*/
#define IOMMU_PRIV (1 << 5)
/*
- * Non-coherent masters on few Qualcomm SoCs can use this page protection flag
- * to set correct cacheability attributes to use an outer level of cache -
- * last level cache, aka system cache.
+ * Non-coherent masters can use this page protection flag to set cacheable
+ * memory attributes for only a transparent outer level of cache, also known as
+ * the last-level or system cache.
*/
-#define IOMMU_QCOM_SYS_CACHE (1 << 6)
+#define IOMMU_SYS_CACHE_ONLY (1 << 6)
struct iommu_ops;
struct iommu_group;
@@ -244,7 +245,11 @@ struct iommu_iotlb_gather {
* @sva_unbind: Unbind process address space from device
* @sva_get_pasid: Get PASID associated to a SVA handle
* @page_response: handle page request response
+ * @cache_invalidate: invalidate translation caches
+ * @sva_bind_gpasid: bind guest pasid and mm
+ * @sva_unbind_gpasid: unbind guest pasid and mm
* @pgsize_bitmap: bitmap of all possible supported page sizes
+ * @owner: Driver module providing these ops
*/
struct iommu_ops {
bool (*capable)(enum iommu_cap);
@@ -256,7 +261,7 @@ struct iommu_ops {
int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
int (*map)(struct iommu_domain *domain, unsigned long iova,
- phys_addr_t paddr, size_t size, int prot);
+ phys_addr_t paddr, size_t size, int prot, gfp_t gfp);
size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
size_t size, struct iommu_iotlb_gather *iotlb_gather);
void (*flush_iotlb_all)(struct iommu_domain *domain);
@@ -306,8 +311,15 @@ struct iommu_ops {
int (*page_response)(struct device *dev,
struct iommu_fault_event *evt,
struct iommu_page_response *msg);
+ int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev,
+ struct iommu_cache_invalidate_info *inv_info);
+ int (*sva_bind_gpasid)(struct iommu_domain *domain,
+ struct device *dev, struct iommu_gpasid_bind_data *data);
+
+ int (*sva_unbind_gpasid)(struct device *dev, int pasid);
unsigned long pgsize_bitmap;
+ struct module *owner;
};
/**
@@ -376,12 +388,19 @@ void iommu_device_sysfs_remove(struct iommu_device *iommu);
int iommu_device_link(struct iommu_device *iommu, struct device *link);
void iommu_device_unlink(struct iommu_device *iommu, struct device *link);
-static inline void iommu_device_set_ops(struct iommu_device *iommu,
- const struct iommu_ops *ops)
+static inline void __iommu_device_set_ops(struct iommu_device *iommu,
+ const struct iommu_ops *ops)
{
iommu->ops = ops;
}
+#define iommu_device_set_ops(iommu, ops) \
+do { \
+ struct iommu_ops *__ops = (struct iommu_ops *)(ops); \
+ __ops->owner = THIS_MODULE; \
+ __iommu_device_set_ops(iommu, __ops); \
+} while (0)
+
static inline void iommu_device_set_fwnode(struct iommu_device *iommu,
struct fwnode_handle *fwnode)
{
@@ -417,10 +436,19 @@ extern int iommu_attach_device(struct iommu_domain *domain,
struct device *dev);
extern void iommu_detach_device(struct iommu_domain *domain,
struct device *dev);
+extern int iommu_cache_invalidate(struct iommu_domain *domain,
+ struct device *dev,
+ struct iommu_cache_invalidate_info *inv_info);
+extern int iommu_sva_bind_gpasid(struct iommu_domain *domain,
+ struct device *dev, struct iommu_gpasid_bind_data *data);
+extern int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
+ struct device *dev, ioasid_t pasid);
extern struct iommu_domain *iommu_get_domain_for_dev(struct device *dev);
extern struct iommu_domain *iommu_get_dma_domain(struct device *dev);
extern int iommu_map(struct iommu_domain *domain, unsigned long iova,
phys_addr_t paddr, size_t size, int prot);
+extern int iommu_map_atomic(struct iommu_domain *domain, unsigned long iova,
+ phys_addr_t paddr, size_t size, int prot);
extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova,
size_t size);
extern size_t iommu_unmap_fast(struct iommu_domain *domain,
@@ -428,12 +456,17 @@ extern size_t iommu_unmap_fast(struct iommu_domain *domain,
struct iommu_iotlb_gather *iotlb_gather);
extern size_t iommu_map_sg(struct iommu_domain *domain, unsigned long iova,
struct scatterlist *sg,unsigned int nents, int prot);
+extern size_t iommu_map_sg_atomic(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot);
extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova);
extern void iommu_set_fault_handler(struct iommu_domain *domain,
iommu_fault_handler_t handler, void *token);
extern void iommu_get_resv_regions(struct device *dev, struct list_head *list);
extern void iommu_put_resv_regions(struct device *dev, struct list_head *list);
+extern void generic_iommu_put_resv_regions(struct device *dev,
+ struct list_head *list);
extern int iommu_request_dm_for_dev(struct device *dev);
extern int iommu_request_dma_domain_for_dev(struct device *dev);
extern void iommu_set_default_passthrough(bool cmd_line);
@@ -548,6 +581,7 @@ struct iommu_group *fsl_mc_device_group(struct device *dev);
* @ops: ops for this device's IOMMU
* @iommu_fwnode: firmware handle for this device's IOMMU
* @iommu_priv: IOMMU driver private data for this device
+ * @num_pasid_bits: number of PASID bits supported by this device
* @num_ids: number of associated device IDs
* @ids: IDs which this device may present to the IOMMU
*/
@@ -556,6 +590,7 @@ struct iommu_fwspec {
struct fwnode_handle *iommu_fwnode;
void *iommu_priv;
u32 flags;
+ u32 num_pasid_bits;
unsigned int num_ids;
u32 ids[1];
};
@@ -662,6 +697,13 @@ static inline int iommu_map(struct iommu_domain *domain, unsigned long iova,
return -ENODEV;
}
+static inline int iommu_map_atomic(struct iommu_domain *domain,
+ unsigned long iova, phys_addr_t paddr,
+ size_t size, int prot)
+{
+ return -ENODEV;
+}
+
static inline size_t iommu_unmap(struct iommu_domain *domain,
unsigned long iova, size_t size)
{
@@ -682,6 +724,13 @@ static inline size_t iommu_map_sg(struct iommu_domain *domain,
return 0;
}
+static inline size_t iommu_map_sg_atomic(struct iommu_domain *domain,
+ unsigned long iova, struct scatterlist *sg,
+ unsigned int nents, int prot)
+{
+ return 0;
+}
+
static inline void iommu_flush_tlb_all(struct iommu_domain *domain)
{
}
@@ -1005,6 +1054,25 @@ static inline int iommu_sva_get_pasid(struct iommu_sva *handle)
return IOMMU_PASID_INVALID;
}
+static inline int
+iommu_cache_invalidate(struct iommu_domain *domain,
+ struct device *dev,
+ struct iommu_cache_invalidate_info *inv_info)
+{
+ return -ENODEV;
+}
+static inline int iommu_sva_bind_gpasid(struct iommu_domain *domain,
+ struct device *dev, struct iommu_gpasid_bind_data *data)
+{
+ return -ENODEV;
+}
+
+static inline int iommu_sva_unbind_gpasid(struct iommu_domain *domain,
+ struct device *dev, int pasid)
+{
+ return -ENODEV;
+}
+
#endif /* CONFIG_IOMMU_API */
#ifdef CONFIG_IOMMU_DEBUGFS
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 7bddddfc76d6..a9b9170b5dd2 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -134,6 +134,7 @@ enum {
IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5,
IORES_DESC_DEVICE_PRIVATE_MEMORY = 6,
IORES_DESC_RESERVED = 7,
+ IORES_DESC_SOFT_RESERVED = 8,
};
/*
diff --git a/include/linux/ipmi-fru.h b/include/linux/ipmi-fru.h
deleted file mode 100644
index 05c9422624c6..000000000000
--- a/include/linux/ipmi-fru.h
+++ /dev/null
@@ -1,134 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ */
-/*
- * Copyright (C) 2012 CERN (www.cern.ch)
- * Author: Alessandro Rubini <rubini@gnudd.com>
- *
- * This work is part of the White Rabbit project, a research effort led
- * by CERN, the European Institute for Nuclear Research.
- */
-#ifndef __LINUX_IPMI_FRU_H__
-#define __LINUX_IPMI_FRU_H__
-#ifdef __KERNEL__
-# include <linux/types.h>
-# include <linux/string.h>
-#else
-# include <stdint.h>
-# include <string.h>
-#endif
-
-/*
- * These structures match the unaligned crap we have in FRU1011.pdf
- * (http://download.intel.com/design/servers/ipmi/FRU1011.pdf)
- */
-
-/* chapter 8, page 5 */
-struct fru_common_header {
- uint8_t format; /* 0x01 */
- uint8_t internal_use_off; /* multiple of 8 bytes */
- uint8_t chassis_info_off; /* multiple of 8 bytes */
- uint8_t board_area_off; /* multiple of 8 bytes */
- uint8_t product_area_off; /* multiple of 8 bytes */
- uint8_t multirecord_off; /* multiple of 8 bytes */
- uint8_t pad; /* must be 0 */
- uint8_t checksum; /* sum modulo 256 must be 0 */
-};
-
-/* chapter 9, page 5 -- internal_use: not used by us */
-
-/* chapter 10, page 6 -- chassis info: not used by us */
-
-/* chapter 13, page 9 -- used by board_info_area below */
-struct fru_type_length {
- uint8_t type_length;
- uint8_t data[0];
-};
-
-/* chapter 11, page 7 */
-struct fru_board_info_area {
- uint8_t format; /* 0x01 */
- uint8_t area_len; /* multiple of 8 bytes */
- uint8_t language; /* I hope it's 0 */
- uint8_t mfg_date[3]; /* LSB, minutes since 1996-01-01 */
- struct fru_type_length tl[0]; /* type-length stuff follows */
-
- /*
- * the TL there are in order:
- * Board Manufacturer
- * Board Product Name
- * Board Serial Number
- * Board Part Number
- * FRU File ID (may be null)
- * more manufacturer-specific stuff
- * 0xc1 as a terminator
- * 0x00 pad to a multiple of 8 bytes - 1
- * checksum (sum of all stuff module 256 must be zero)
- */
-};
-
-enum fru_type {
- FRU_TYPE_BINARY = 0x00,
- FRU_TYPE_BCDPLUS = 0x40,
- FRU_TYPE_ASCII6 = 0x80,
- FRU_TYPE_ASCII = 0xc0, /* not ascii: depends on language */
-};
-
-/*
- * some helpers
- */
-static inline struct fru_board_info_area *fru_get_board_area(
- const struct fru_common_header *header)
-{
- /* we know for sure that the header is 8 bytes in size */
- return (struct fru_board_info_area *)(header + header->board_area_off);
-}
-
-static inline int fru_type(struct fru_type_length *tl)
-{
- return tl->type_length & 0xc0;
-}
-
-static inline int fru_length(struct fru_type_length *tl)
-{
- return (tl->type_length & 0x3f) + 1; /* len of whole record */
-}
-
-/* assume ascii-latin1 encoding */
-static inline int fru_strlen(struct fru_type_length *tl)
-{
- return fru_length(tl) - 1;
-}
-
-static inline char *fru_strcpy(char *dest, struct fru_type_length *tl)
-{
- int len = fru_strlen(tl);
- memcpy(dest, tl->data, len);
- dest[len] = '\0';
- return dest;
-}
-
-static inline struct fru_type_length *fru_next_tl(struct fru_type_length *tl)
-{
- return tl + fru_length(tl);
-}
-
-static inline int fru_is_eof(struct fru_type_length *tl)
-{
- return tl->type_length == 0xc1;
-}
-
-/*
- * External functions defined in fru-parse.c.
- */
-extern int fru_header_cksum_ok(struct fru_common_header *header);
-extern int fru_bia_cksum_ok(struct fru_board_info_area *bia);
-
-/* All these 4 return allocated strings by calling fru_alloc() */
-extern char *fru_get_board_manufacturer(struct fru_common_header *header);
-extern char *fru_get_product_name(struct fru_common_header *header);
-extern char *fru_get_serial_number(struct fru_common_header *header);
-extern char *fru_get_part_number(struct fru_common_header *header);
-
-/* This must be defined by the caller of the above functions */
-extern void *fru_alloc(size_t size);
-
-#endif /* __LINUX_IMPI_FRU_H__ */
diff --git a/include/linux/ipmi_smi.h b/include/linux/ipmi_smi.h
index 4dc66157d872..deec18b8944a 100644
--- a/include/linux/ipmi_smi.h
+++ b/include/linux/ipmi_smi.h
@@ -224,10 +224,14 @@ static inline int ipmi_demangle_device_id(uint8_t netfn, uint8_t cmd,
* is called, and the lower layer must get the interface from that
* call.
*/
-int ipmi_register_smi(const struct ipmi_smi_handlers *handlers,
- void *send_info,
- struct device *dev,
- unsigned char slave_addr);
+int ipmi_add_smi(struct module *owner,
+ const struct ipmi_smi_handlers *handlers,
+ void *send_info,
+ struct device *dev,
+ unsigned char slave_addr);
+
+#define ipmi_register_smi(handlers, send_info, dev, slave_addr) \
+ ipmi_add_smi(THIS_MODULE, handlers, send_info, dev, slave_addr)
/*
* Remove a low-level interface from the IPMI driver. This will
diff --git a/include/linux/irq.h b/include/linux/irq.h
index fb301cf29148..9315fbb87db3 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -209,6 +209,10 @@ struct irq_data {
* IRQD_SINGLE_TARGET - IRQ allows only a single affinity target
* IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
* IRQD_CAN_RESERVE - Can use reservation mode
+ * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change
+ * required
+ * IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked
+ * from actual interrupt context.
*/
enum {
IRQD_TRIGGER_MASK = 0xf,
@@ -231,6 +235,8 @@ enum {
IRQD_SINGLE_TARGET = (1 << 24),
IRQD_DEFAULT_TRIGGER_SET = (1 << 25),
IRQD_CAN_RESERVE = (1 << 26),
+ IRQD_MSI_NOMASK_QUIRK = (1 << 27),
+ IRQD_HANDLE_ENFORCE_IRQCTX = (1 << 28),
};
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -300,6 +306,16 @@ static inline bool irqd_is_single_target(struct irq_data *d)
return __irqd_to_state(d) & IRQD_SINGLE_TARGET;
}
+static inline void irqd_set_handle_enforce_irqctx(struct irq_data *d)
+{
+ __irqd_to_state(d) |= IRQD_HANDLE_ENFORCE_IRQCTX;
+}
+
+static inline bool irqd_is_handle_enforce_irqctx(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_HANDLE_ENFORCE_IRQCTX;
+}
+
static inline bool irqd_is_wakeup_set(struct irq_data *d)
{
return __irqd_to_state(d) & IRQD_WAKEUP_STATE;
@@ -390,6 +406,21 @@ static inline bool irqd_can_reserve(struct irq_data *d)
return __irqd_to_state(d) & IRQD_CAN_RESERVE;
}
+static inline void irqd_set_msi_nomask_quirk(struct irq_data *d)
+{
+ __irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK;
+}
+
+static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d)
+{
+ __irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK;
+}
+
+static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
+{
+ return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
+}
+
#undef __irqd_to_state
static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
@@ -610,6 +641,12 @@ extern int irq_chip_pm_put(struct irq_data *data);
#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
extern void handle_fasteoi_ack_irq(struct irq_desc *desc);
extern void handle_fasteoi_mask_irq(struct irq_desc *desc);
+extern int irq_chip_set_parent_state(struct irq_data *data,
+ enum irqchip_irq_state which,
+ bool val);
+extern int irq_chip_get_parent_state(struct irq_data *data,
+ enum irqchip_irq_state which,
+ bool *state);
extern void irq_chip_enable_parent(struct irq_data *data);
extern void irq_chip_disable_parent(struct irq_data *data);
extern void irq_chip_ack_parent(struct irq_data *data);
diff --git a/include/linux/irq_work.h b/include/linux/irq_work.h
index b11fcdfd0770..3b752e80c017 100644
--- a/include/linux/irq_work.h
+++ b/include/linux/irq_work.h
@@ -18,11 +18,13 @@
/* Doesn't want IPI, wait for tick: */
#define IRQ_WORK_LAZY BIT(2)
+/* Run hard IRQ context, even on RT */
+#define IRQ_WORK_HARD_IRQ BIT(3)
#define IRQ_WORK_CLAIMED (IRQ_WORK_PENDING | IRQ_WORK_BUSY)
struct irq_work {
- unsigned long flags;
+ atomic_t flags;
struct llist_node llnode;
void (*func)(struct irq_work *);
};
@@ -30,11 +32,15 @@ struct irq_work {
static inline
void init_irq_work(struct irq_work *work, void (*func)(struct irq_work *))
{
- work->flags = 0;
+ atomic_set(&work->flags, 0);
work->func = func;
}
-#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { .func = (_f), }
+#define DEFINE_IRQ_WORK(name, _f) struct irq_work name = { \
+ .flags = ATOMIC_INIT(0), \
+ .func = (_f) \
+}
+
bool irq_work_queue(struct irq_work *work);
bool irq_work_queue_on(struct irq_work *work, int cpu);
diff --git a/include/linux/irqchip/arm-gic-common.h b/include/linux/irqchip/arm-gic-common.h
index b9850f5f1906..fa8c0455c352 100644
--- a/include/linux/irqchip/arm-gic-common.h
+++ b/include/linux/irqchip/arm-gic-common.h
@@ -32,6 +32,8 @@ struct gic_kvm_info {
struct resource vctrl;
/* vlpi support */
bool has_v4;
+ /* rvpeid support */
+ bool has_v4_1;
};
const struct gic_kvm_info *gic_get_kvm_info(void);
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 5cc10cf7cb3e..765d9b769b69 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -13,6 +13,7 @@
#define GICD_CTLR 0x0000
#define GICD_TYPER 0x0004
#define GICD_IIDR 0x0008
+#define GICD_TYPER2 0x000C
#define GICD_STATUSR 0x0010
#define GICD_SETSPI_NSR 0x0040
#define GICD_CLRSPI_NSR 0x0048
@@ -56,6 +57,7 @@
#define GICD_SPENDSGIR 0x0F20
#define GICD_CTLR_RWP (1U << 31)
+#define GICD_CTLR_nASSGIreq (1U << 8)
#define GICD_CTLR_DS (1U << 6)
#define GICD_CTLR_ARE_NS (1U << 4)
#define GICD_CTLR_ENABLE_G1A (1U << 1)
@@ -89,6 +91,10 @@
#define GICD_TYPER_ESPIS(typer) \
(((typer) & GICD_TYPER_ESPI) ? GICD_TYPER_SPIS((typer) >> 27) : 0)
+#define GICD_TYPER2_nASSGIcap (1U << 8)
+#define GICD_TYPER2_VIL (1U << 7)
+#define GICD_TYPER2_VID GENMASK(4, 0)
+
#define GICD_IROUTER_SPI_MODE_ONE (0U << 31)
#define GICD_IROUTER_SPI_MODE_ANY (1U << 31)
@@ -98,6 +104,11 @@
#define GIC_V3_DIST_SIZE 0x10000
+#define GIC_PAGE_SIZE_4K 0ULL
+#define GIC_PAGE_SIZE_16K 1ULL
+#define GIC_PAGE_SIZE_64K 2ULL
+#define GIC_PAGE_SIZE_MASK 3ULL
+
/*
* Re-Distributor registers, offsets from RD_base
*/
@@ -234,6 +245,16 @@
#define GICR_TYPER_VLPIS (1U << 1)
#define GICR_TYPER_DirectLPIS (1U << 3)
#define GICR_TYPER_LAST (1U << 4)
+#define GICR_TYPER_RVPEID (1U << 7)
+#define GICR_TYPER_COMMON_LPI_AFF GENMASK_ULL(25, 24)
+#define GICR_TYPER_AFFINITY GENMASK_ULL(63, 32)
+
+#define GICR_INVLPIR_INTID GENMASK_ULL(31, 0)
+#define GICR_INVLPIR_VPEID GENMASK_ULL(47, 32)
+#define GICR_INVLPIR_V GENMASK_ULL(63, 63)
+
+#define GICR_INVALLR_VPEID GICR_INVLPIR_VPEID
+#define GICR_INVALLR_V GICR_INVLPIR_V
#define GIC_V3_REDIST_SIZE 0x20000
@@ -272,6 +293,18 @@
#define GICR_VPROPBASER_RaWaWt GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWt)
#define GICR_VPROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_VPROPBASER, INNER, RaWaWb)
+/*
+ * GICv4.1 VPROPBASER reinvention. A subtle mix between the old
+ * VPROPBASER and ITS_BASER. Just not quite any of the two.
+ */
+#define GICR_VPROPBASER_4_1_VALID (1ULL << 63)
+#define GICR_VPROPBASER_4_1_ENTRY_SIZE GENMASK_ULL(61, 59)
+#define GICR_VPROPBASER_4_1_INDIRECT (1ULL << 55)
+#define GICR_VPROPBASER_4_1_PAGE_SIZE GENMASK_ULL(54, 53)
+#define GICR_VPROPBASER_4_1_Z (1ULL << 52)
+#define GICR_VPROPBASER_4_1_ADDR GENMASK_ULL(51, 12)
+#define GICR_VPROPBASER_4_1_SIZE GENMASK_ULL(6, 0)
+
#define GICR_VPENDBASER 0x0078
#define GICR_VPENDBASER_SHAREABILITY_SHIFT (10)
@@ -289,6 +322,9 @@
#define GICR_VPENDBASER_NonShareable \
GIC_BASER_SHAREABILITY(GICR_VPENDBASER, NonShareable)
+#define GICR_VPENDBASER_InnerShareable \
+ GIC_BASER_SHAREABILITY(GICR_VPENDBASER, InnerShareable)
+
#define GICR_VPENDBASER_nCnB GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nCnB)
#define GICR_VPENDBASER_nC GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, nC)
#define GICR_VPENDBASER_RaWt GIC_BASER_CACHEABILITY(GICR_VPENDBASER, INNER, RaWt)
@@ -304,11 +340,30 @@
#define GICR_VPENDBASER_Valid (1ULL << 63)
/*
+ * GICv4.1 VPENDBASER, used for VPE residency. On top of these fields,
+ * also use the above Valid, PendingLast and Dirty.
+ */
+#define GICR_VPENDBASER_4_1_DB (1ULL << 62)
+#define GICR_VPENDBASER_4_1_VGRP0EN (1ULL << 59)
+#define GICR_VPENDBASER_4_1_VGRP1EN (1ULL << 58)
+#define GICR_VPENDBASER_4_1_VPEID GENMASK_ULL(15, 0)
+
+#define GICR_VSGIR 0x0080
+
+#define GICR_VSGIR_VPEID GENMASK(15, 0)
+
+#define GICR_VSGIPENDR 0x0088
+
+#define GICR_VSGIPENDR_BUSY (1U << 31)
+#define GICR_VSGIPENDR_PENDING GENMASK(15, 0)
+
+/*
* ITS registers, offsets from ITS_base
*/
#define GITS_CTLR 0x0000
#define GITS_IIDR 0x0004
#define GITS_TYPER 0x0008
+#define GITS_MPIDR 0x0018
#define GITS_CBASER 0x0080
#define GITS_CWRITER 0x0088
#define GITS_CREADR 0x0090
@@ -325,6 +380,11 @@
#define GITS_TRANSLATER 0x10040
+#define GITS_SGIR 0x20020
+
+#define GITS_SGIR_VPEID GENMASK_ULL(47, 32)
+#define GITS_SGIR_VINTID GENMASK_ULL(3, 0)
+
#define GITS_CTLR_ENABLE (1U << 0)
#define GITS_CTLR_ImDe (1U << 1)
#define GITS_CTLR_ITS_NUMBER_SHIFT 4
@@ -334,14 +394,16 @@
#define GITS_TYPER_PLPIS (1UL << 0)
#define GITS_TYPER_VLPIS (1UL << 1)
#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
-#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1)
+#define GITS_TYPER_ITT_ENTRY_SIZE GENMASK_ULL(7, 4)
#define GITS_TYPER_IDBITS_SHIFT 8
#define GITS_TYPER_DEVBITS_SHIFT 13
-#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
+#define GITS_TYPER_DEVBITS GENMASK_ULL(17, 13)
#define GITS_TYPER_PTA (1UL << 19)
#define GITS_TYPER_HCC_SHIFT 24
#define GITS_TYPER_HCC(r) (((r) >> GITS_TYPER_HCC_SHIFT) & 0xff)
#define GITS_TYPER_VMOVP (1ULL << 37)
+#define GITS_TYPER_VMAPP (1ULL << 40)
+#define GITS_TYPER_SVPET GENMASK_ULL(42, 41)
#define GITS_IIDR_REV_SHIFT 12
#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT)
@@ -412,10 +474,11 @@
#define GITS_BASER_InnerShareable \
GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
#define GITS_BASER_PAGE_SIZE_SHIFT (8)
-#define GITS_BASER_PAGE_SIZE_4K (0ULL << GITS_BASER_PAGE_SIZE_SHIFT)
-#define GITS_BASER_PAGE_SIZE_16K (1ULL << GITS_BASER_PAGE_SIZE_SHIFT)
-#define GITS_BASER_PAGE_SIZE_64K (2ULL << GITS_BASER_PAGE_SIZE_SHIFT)
-#define GITS_BASER_PAGE_SIZE_MASK (3ULL << GITS_BASER_PAGE_SIZE_SHIFT)
+#define __GITS_BASER_PSZ(sz) (GIC_PAGE_SIZE_ ## sz << GITS_BASER_PAGE_SIZE_SHIFT)
+#define GITS_BASER_PAGE_SIZE_4K __GITS_BASER_PSZ(4K)
+#define GITS_BASER_PAGE_SIZE_16K __GITS_BASER_PSZ(16K)
+#define GITS_BASER_PAGE_SIZE_64K __GITS_BASER_PSZ(64K)
+#define GITS_BASER_PAGE_SIZE_MASK __GITS_BASER_PSZ(MASK)
#define GITS_BASER_PAGES_MAX 256
#define GITS_BASER_PAGES_SHIFT (0)
#define GITS_BASER_NR_PAGES(r) (((r) & 0xff) + 1)
@@ -456,8 +519,10 @@
#define GITS_CMD_VMAPTI GITS_CMD_GICv4(GITS_CMD_MAPTI)
#define GITS_CMD_VMOVI GITS_CMD_GICv4(GITS_CMD_MOVI)
#define GITS_CMD_VSYNC GITS_CMD_GICv4(GITS_CMD_SYNC)
-/* VMOVP is the odd one, as it doesn't have a physical counterpart */
+/* VMOVP, VSGI and INVDB are the odd ones, as they dont have a physical counterpart */
#define GITS_CMD_VMOVP GITS_CMD_GICv4(2)
+#define GITS_CMD_VSGI GITS_CMD_GICv4(3)
+#define GITS_CMD_INVDB GITS_CMD_GICv4(0xe)
/*
* ITS error numbers
@@ -487,6 +552,8 @@
#define ICC_CTLR_EL1_EOImode_MASK (1 << ICC_CTLR_EL1_EOImode_SHIFT)
#define ICC_CTLR_EL1_CBPR_SHIFT 0
#define ICC_CTLR_EL1_CBPR_MASK (1 << ICC_CTLR_EL1_CBPR_SHIFT)
+#define ICC_CTLR_EL1_PMHE_SHIFT 6
+#define ICC_CTLR_EL1_PMHE_MASK (1 << ICC_CTLR_EL1_PMHE_SHIFT)
#define ICC_CTLR_EL1_PRI_BITS_SHIFT 8
#define ICC_CTLR_EL1_PRI_BITS_MASK (0x7 << ICC_CTLR_EL1_PRI_BITS_SHIFT)
#define ICC_CTLR_EL1_ID_BITS_SHIFT 11
@@ -603,16 +670,21 @@
struct rdists {
struct {
+ raw_spinlock_t rd_lock;
void __iomem *rd_base;
struct page *pend_page;
phys_addr_t phys_base;
bool lpi_enabled;
+ cpumask_t *vpe_table_mask;
+ void *vpe_l1_base;
} __percpu *rdist;
phys_addr_t prop_table_pa;
void *prop_table_va;
u64 flags;
u32 gicd_typer;
+ u32 gicd_typer2;
bool has_vlpis;
+ bool has_rvpeid;
bool has_direct_lpi;
};
diff --git a/include/linux/irqchip/arm-gic-v4.h b/include/linux/irqchip/arm-gic-v4.h
index e6b155713b47..6976b8331b60 100644
--- a/include/linux/irqchip/arm-gic-v4.h
+++ b/include/linux/irqchip/arm-gic-v4.h
@@ -32,11 +32,39 @@ struct its_vm {
struct its_vpe {
struct page *vpt_page;
struct its_vm *its_vm;
+ /* per-vPE VLPI tracking */
+ atomic_t vlpi_count;
/* Doorbell interrupt */
int irq;
irq_hw_number_t vpe_db_lpi;
- /* VPE proxy mapping */
- int vpe_proxy_event;
+ /* VPE resident */
+ bool resident;
+ union {
+ /* GICv4.0 implementations */
+ struct {
+ /* VPE proxy mapping */
+ int vpe_proxy_event;
+ /* Implementation Defined Area Invalid */
+ bool idai;
+ };
+ /* GICv4.1 implementations */
+ struct {
+ struct fwnode_handle *fwnode;
+ struct irq_domain *sgi_domain;
+ struct {
+ u8 priority;
+ bool enabled;
+ bool group;
+ } sgi_config[16];
+ atomic_t vmapp_count;
+ };
+ };
+
+ /*
+ * Ensures mutual exclusion between affinity setting of the
+ * vPE and vLPI operations using vpe->col_idx.
+ */
+ raw_spinlock_t vpe_lock;
/*
* This collection ID is used to indirect the target
* redistributor for this VPE. The ID itself isn't involved in
@@ -45,8 +73,6 @@ struct its_vpe {
u16 col_idx;
/* Unique (system-wide) VPE identifier */
u16 vpe_id;
- /* Implementation Defined Area Invalid */
- bool idai;
/* Pending VLPIs on schedule out? */
bool pending_last;
};
@@ -79,6 +105,7 @@ enum its_vcpu_info_cmd_type {
SCHEDULE_VPE,
DESCHEDULE_VPE,
INVALL_VPE,
+ PROP_UPDATE_VSGI,
};
struct its_cmd_info {
@@ -86,19 +113,32 @@ struct its_cmd_info {
union {
struct its_vlpi_map *map;
u8 config;
+ bool req_db;
+ struct {
+ bool g0en;
+ bool g1en;
+ };
+ struct {
+ u8 priority;
+ bool group;
+ };
};
};
int its_alloc_vcpu_irqs(struct its_vm *vm);
void its_free_vcpu_irqs(struct its_vm *vm);
-int its_schedule_vpe(struct its_vpe *vpe, bool on);
+int its_make_vpe_resident(struct its_vpe *vpe, bool g0en, bool g1en);
+int its_make_vpe_non_resident(struct its_vpe *vpe, bool db);
int its_invall_vpe(struct its_vpe *vpe);
int its_map_vlpi(int irq, struct its_vlpi_map *map);
int its_get_vlpi(int irq, struct its_vlpi_map *map);
int its_unmap_vlpi(int irq);
int its_prop_update_vlpi(int irq, u8 config, bool inv);
+int its_prop_update_vsgi(int irq, u8 priority, bool group);
struct irq_domain_ops;
-int its_init_v4(struct irq_domain *domain, const struct irq_domain_ops *ops);
+int its_init_v4(struct irq_domain *domain,
+ const struct irq_domain_ops *vpe_ops,
+ const struct irq_domain_ops *sgi_ops);
#endif
diff --git a/include/linux/irqchip/ingenic.h b/include/linux/irqchip/ingenic.h
deleted file mode 100644
index 146558853ad4..000000000000
--- a/include/linux/irqchip/ingenic.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
- */
-
-#ifndef __LINUX_IRQCHIP_INGENIC_H__
-#define __LINUX_IRQCHIP_INGENIC_H__
-
-#include <linux/irq.h>
-
-extern void ingenic_intc_irq_suspend(struct irq_data *data);
-extern void ingenic_intc_irq_resume(struct irq_data *data);
-
-#endif
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 583e7abd07f9..8d062e86d954 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -83,6 +83,7 @@ enum irq_domain_bus_token {
DOMAIN_BUS_IPI,
DOMAIN_BUS_FSL_MC_MSI,
DOMAIN_BUS_TI_SCI_INTA_MSI,
+ DOMAIN_BUS_WAKEUP,
};
/**
@@ -191,7 +192,7 @@ enum {
IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0),
/* Irq domain name was allocated in __irq_domain_add() */
- IRQ_DOMAIN_NAME_ALLOCATED = (1 << 6),
+ IRQ_DOMAIN_NAME_ALLOCATED = (1 << 1),
/* Irq domain is an IPI domain with virq per cpu */
IRQ_DOMAIN_FLAG_IPI_PER_CPU = (1 << 2),
@@ -206,6 +207,13 @@ enum {
IRQ_DOMAIN_FLAG_MSI_REMAP = (1 << 5),
/*
+ * Quirk to handle MSI implementations which do not provide
+ * masking. Currently known to affect x86, but partially
+ * handled in core code.
+ */
+ IRQ_DOMAIN_MSI_NOMASK_QUIRK = (1 << 6),
+
+ /*
* Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved
* for implementation specific purposes and ignored by the
* core code.
@@ -426,6 +434,11 @@ int irq_domain_translate_twocell(struct irq_domain *d,
unsigned long *out_hwirq,
unsigned int *out_type);
+int irq_domain_translate_onecell(struct irq_domain *d,
+ struct irq_fwspec *fwspec,
+ unsigned long *out_hwirq,
+ unsigned int *out_type);
+
/* IPI functions */
int irq_reserve_ipi(struct irq_domain *domain, const struct cpumask *dest);
int irq_destroy_ipi(unsigned int irq, const struct cpumask *dest);
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 21619c92c377..ceca42de4438 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -15,15 +15,15 @@
#include <linux/typecheck.h>
#include <asm/irqflags.h>
-/* Currently trace_softirqs_on/off is used only by lockdep */
+/* Currently lockdep_softirqs_on/off is used only by lockdep */
#ifdef CONFIG_PROVE_LOCKING
- extern void trace_softirqs_on(unsigned long ip);
- extern void trace_softirqs_off(unsigned long ip);
+ extern void lockdep_softirqs_on(unsigned long ip);
+ extern void lockdep_softirqs_off(unsigned long ip);
extern void lockdep_hardirqs_on(unsigned long ip);
extern void lockdep_hardirqs_off(unsigned long ip);
#else
- static inline void trace_softirqs_on(unsigned long ip) { }
- static inline void trace_softirqs_off(unsigned long ip) { }
+ static inline void lockdep_softirqs_on(unsigned long ip) { }
+ static inline void lockdep_softirqs_off(unsigned long ip) { }
static inline void lockdep_hardirqs_on(unsigned long ip) { }
static inline void lockdep_hardirqs_off(unsigned long ip) { }
#endif
@@ -31,15 +31,20 @@
#ifdef CONFIG_TRACE_IRQFLAGS
extern void trace_hardirqs_on(void);
extern void trace_hardirqs_off(void);
-# define trace_hardirq_context(p) ((p)->hardirq_context)
-# define trace_softirq_context(p) ((p)->softirq_context)
-# define trace_hardirqs_enabled(p) ((p)->hardirqs_enabled)
-# define trace_softirqs_enabled(p) ((p)->softirqs_enabled)
-# define trace_hardirq_enter() \
+# define lockdep_hardirq_context(p) ((p)->hardirq_context)
+# define lockdep_softirq_context(p) ((p)->softirq_context)
+# define lockdep_hardirqs_enabled(p) ((p)->hardirqs_enabled)
+# define lockdep_softirqs_enabled(p) ((p)->softirqs_enabled)
+# define lockdep_hardirq_enter() \
do { \
- current->hardirq_context++; \
+ if (!current->hardirq_context++) \
+ current->hardirq_threaded = 0; \
} while (0)
-# define trace_hardirq_exit() \
+# define lockdep_hardirq_threaded() \
+do { \
+ current->hardirq_threaded = 1; \
+} while (0)
+# define lockdep_hardirq_exit() \
do { \
current->hardirq_context--; \
} while (0)
@@ -51,17 +56,58 @@ do { \
do { \
current->softirq_context--; \
} while (0)
+
+# define lockdep_hrtimer_enter(__hrtimer) \
+ do { \
+ if (!__hrtimer->is_hard) \
+ current->irq_config = 1; \
+ } while (0)
+
+# define lockdep_hrtimer_exit(__hrtimer) \
+ do { \
+ if (!__hrtimer->is_hard) \
+ current->irq_config = 0; \
+ } while (0)
+
+# define lockdep_posixtimer_enter() \
+ do { \
+ current->irq_config = 1; \
+ } while (0)
+
+# define lockdep_posixtimer_exit() \
+ do { \
+ current->irq_config = 0; \
+ } while (0)
+
+# define lockdep_irq_work_enter(__work) \
+ do { \
+ if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\
+ current->irq_config = 1; \
+ } while (0)
+# define lockdep_irq_work_exit(__work) \
+ do { \
+ if (!(atomic_read(&__work->flags) & IRQ_WORK_HARD_IRQ))\
+ current->irq_config = 0; \
+ } while (0)
+
#else
# define trace_hardirqs_on() do { } while (0)
# define trace_hardirqs_off() do { } while (0)
-# define trace_hardirq_context(p) 0
-# define trace_softirq_context(p) 0
-# define trace_hardirqs_enabled(p) 0
-# define trace_softirqs_enabled(p) 0
-# define trace_hardirq_enter() do { } while (0)
-# define trace_hardirq_exit() do { } while (0)
+# define lockdep_hardirq_context(p) 0
+# define lockdep_softirq_context(p) 0
+# define lockdep_hardirqs_enabled(p) 0
+# define lockdep_softirqs_enabled(p) 0
+# define lockdep_hardirq_enter() do { } while (0)
+# define lockdep_hardirq_threaded() do { } while (0)
+# define lockdep_hardirq_exit() do { } while (0)
# define lockdep_softirq_enter() do { } while (0)
# define lockdep_softirq_exit() do { } while (0)
+# define lockdep_hrtimer_enter(__hrtimer) do { } while (0)
+# define lockdep_hrtimer_exit(__hrtimer) do { } while (0)
+# define lockdep_posixtimer_enter() do { } while (0)
+# define lockdep_posixtimer_exit() do { } while (0)
+# define lockdep_irq_work_enter(__work) do { } while (0)
+# define lockdep_irq_work_exit(__work) do { } while (0)
#endif
#if defined(CONFIG_IRQSOFF_TRACER) || \
diff --git a/include/linux/isdn/capilli.h b/include/linux/isdn/capilli.h
index d75e1ad72964..12be09b6883b 100644
--- a/include/linux/isdn/capilli.h
+++ b/include/linux/isdn/capilli.h
@@ -69,7 +69,6 @@ struct capi_ctr {
unsigned short state; /* controller state */
int blocked; /* output blocked */
int traceflag; /* capi trace */
- wait_queue_head_t state_wait_queue;
struct proc_dir_entry *procent;
char procfn[128];
@@ -80,8 +79,6 @@ int detach_capi_ctr(struct capi_ctr *);
void capi_ctr_ready(struct capi_ctr * card);
void capi_ctr_down(struct capi_ctr * card);
-void capi_ctr_suspend_output(struct capi_ctr * card);
-void capi_ctr_resume_output(struct capi_ctr * card);
void capi_ctr_handle_message(struct capi_ctr * card, u16 appl, struct sk_buff *skb);
// ---------------------------------------------------------------------------
@@ -91,23 +88,8 @@ struct capi_driver {
char name[32]; /* driver name */
char revision[32];
- int (*add_card)(struct capi_driver *driver, capicardparams *data);
-
/* management information for kcapi */
struct list_head list;
};
-void register_capi_driver(struct capi_driver *driver);
-void unregister_capi_driver(struct capi_driver *driver);
-
-// ---------------------------------------------------------------------------
-// library functions for use by hardware controller drivers
-
-void capilib_new_ncci(struct list_head *head, u16 applid, u32 ncci, u32 winsize);
-void capilib_free_ncci(struct list_head *head, u16 applid, u32 ncci);
-void capilib_release_appl(struct list_head *head, u16 applid);
-void capilib_release(struct list_head *head);
-void capilib_data_b3_conf(struct list_head *head, u16 applid, u32 ncci, u16 msgid);
-u16 capilib_data_b3_req(struct list_head *head, u16 applid, u32 ncci, u16 msgid);
-
#endif /* __CAPILLI_H__ */
diff --git a/include/linux/isdn/capiutil.h b/include/linux/isdn/capiutil.h
index 44bd6046e6e2..953fd500dff7 100644
--- a/include/linux/isdn/capiutil.h
+++ b/include/linux/isdn/capiutil.h
@@ -57,460 +57,4 @@ static inline void capimsg_setu32(void *m, int off, __u32 val)
#define CAPIMSG_SETCONTROL(m, contr) capimsg_setu32(m, 8, contr)
#define CAPIMSG_SETDATALEN(m, len) capimsg_setu16(m, 16, len)
-/*----- basic-type definitions -----*/
-
-typedef __u8 *_cstruct;
-
-typedef enum {
- CAPI_COMPOSE,
- CAPI_DEFAULT
-} _cmstruct;
-
-/*
- The _cmsg structure contains all possible CAPI 2.0 parameter.
- All parameters are stored here first. The function CAPI_CMSG_2_MESSAGE
- assembles the parameter and builds CAPI2.0 conform messages.
- CAPI_MESSAGE_2_CMSG disassembles CAPI 2.0 messages and stores the
- parameter in the _cmsg structure
- */
-
-typedef struct {
- /* Header */
- __u16 ApplId;
- __u8 Command;
- __u8 Subcommand;
- __u16 Messagenumber;
-
- /* Parameter */
- union {
- __u32 adrController;
- __u32 adrPLCI;
- __u32 adrNCCI;
- } adr;
-
- _cmstruct AdditionalInfo;
- _cstruct B1configuration;
- __u16 B1protocol;
- _cstruct B2configuration;
- __u16 B2protocol;
- _cstruct B3configuration;
- __u16 B3protocol;
- _cstruct BC;
- _cstruct BChannelinformation;
- _cmstruct BProtocol;
- _cstruct CalledPartyNumber;
- _cstruct CalledPartySubaddress;
- _cstruct CallingPartyNumber;
- _cstruct CallingPartySubaddress;
- __u32 CIPmask;
- __u32 CIPmask2;
- __u16 CIPValue;
- __u32 Class;
- _cstruct ConnectedNumber;
- _cstruct ConnectedSubaddress;
- __u32 Data;
- __u16 DataHandle;
- __u16 DataLength;
- _cstruct FacilityConfirmationParameter;
- _cstruct Facilitydataarray;
- _cstruct FacilityIndicationParameter;
- _cstruct FacilityRequestParameter;
- __u16 FacilitySelector;
- __u16 Flags;
- __u32 Function;
- _cstruct HLC;
- __u16 Info;
- _cstruct InfoElement;
- __u32 InfoMask;
- __u16 InfoNumber;
- _cstruct Keypadfacility;
- _cstruct LLC;
- _cstruct ManuData;
- __u32 ManuID;
- _cstruct NCPI;
- __u16 Reason;
- __u16 Reason_B3;
- __u16 Reject;
- _cstruct Useruserdata;
-
- /* intern */
- unsigned l, p;
- unsigned char *par;
- __u8 *m;
-
- /* buffer to construct message */
- __u8 buf[180];
-
-} _cmsg;
-
-/*
- * capi_cmsg2message() assembles the parameter from _cmsg to a CAPI 2.0
- * conform message
- */
-unsigned capi_cmsg2message(_cmsg * cmsg, __u8 * msg);
-
-/*
- * capi_message2cmsg disassembles a CAPI message an writes the parameter
- * into _cmsg for easy access
- */
-unsigned capi_message2cmsg(_cmsg * cmsg, __u8 * msg);
-
-/*
- * capi_cmsg_header() fills the _cmsg structure with default values, so only
- * parameter with non default values must be changed before sending the
- * message.
- */
-unsigned capi_cmsg_header(_cmsg * cmsg, __u16 _ApplId,
- __u8 _Command, __u8 _Subcommand,
- __u16 _Messagenumber, __u32 _Controller);
-
-/*-----------------------------------------------------------------------*/
-
-/*
- * Debugging / Tracing functions
- */
-
-char *capi_cmd2str(__u8 cmd, __u8 subcmd);
-
-typedef struct {
- u_char *buf;
- u_char *p;
- size_t size;
- size_t pos;
-} _cdebbuf;
-
-#define CDEBUG_SIZE 1024
-#define CDEBUG_GSIZE 4096
-
-void cdebbuf_free(_cdebbuf *cdb);
-int cdebug_init(void);
-void cdebug_exit(void);
-
-_cdebbuf *capi_cmsg2str(_cmsg *cmsg);
-_cdebbuf *capi_message2str(__u8 *msg);
-
-/*-----------------------------------------------------------------------*/
-
-static inline void capi_cmsg_answer(_cmsg * cmsg)
-{
- cmsg->Subcommand |= 0x01;
-}
-
-/*-----------------------------------------------------------------------*/
-
-static inline void capi_fill_CONNECT_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- _cstruct NCPI)
-{
- capi_cmsg_header(cmsg, ApplId, 0x82, 0x80, Messagenumber, adr);
- cmsg->NCPI = NCPI;
-}
-
-static inline void capi_fill_FACILITY_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u16 FacilitySelector,
- _cstruct FacilityRequestParameter)
-{
- capi_cmsg_header(cmsg, ApplId, 0x80, 0x80, Messagenumber, adr);
- cmsg->FacilitySelector = FacilitySelector;
- cmsg->FacilityRequestParameter = FacilityRequestParameter;
-}
-
-static inline void capi_fill_INFO_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- _cstruct CalledPartyNumber,
- _cstruct BChannelinformation,
- _cstruct Keypadfacility,
- _cstruct Useruserdata,
- _cstruct Facilitydataarray)
-{
- capi_cmsg_header(cmsg, ApplId, 0x08, 0x80, Messagenumber, adr);
- cmsg->CalledPartyNumber = CalledPartyNumber;
- cmsg->BChannelinformation = BChannelinformation;
- cmsg->Keypadfacility = Keypadfacility;
- cmsg->Useruserdata = Useruserdata;
- cmsg->Facilitydataarray = Facilitydataarray;
-}
-
-static inline void capi_fill_LISTEN_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u32 InfoMask,
- __u32 CIPmask,
- __u32 CIPmask2,
- _cstruct CallingPartyNumber,
- _cstruct CallingPartySubaddress)
-{
- capi_cmsg_header(cmsg, ApplId, 0x05, 0x80, Messagenumber, adr);
- cmsg->InfoMask = InfoMask;
- cmsg->CIPmask = CIPmask;
- cmsg->CIPmask2 = CIPmask2;
- cmsg->CallingPartyNumber = CallingPartyNumber;
- cmsg->CallingPartySubaddress = CallingPartySubaddress;
-}
-
-static inline void capi_fill_ALERT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- _cstruct BChannelinformation,
- _cstruct Keypadfacility,
- _cstruct Useruserdata,
- _cstruct Facilitydataarray)
-{
- capi_cmsg_header(cmsg, ApplId, 0x01, 0x80, Messagenumber, adr);
- cmsg->BChannelinformation = BChannelinformation;
- cmsg->Keypadfacility = Keypadfacility;
- cmsg->Useruserdata = Useruserdata;
- cmsg->Facilitydataarray = Facilitydataarray;
-}
-
-static inline void capi_fill_CONNECT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u16 CIPValue,
- _cstruct CalledPartyNumber,
- _cstruct CallingPartyNumber,
- _cstruct CalledPartySubaddress,
- _cstruct CallingPartySubaddress,
- __u16 B1protocol,
- __u16 B2protocol,
- __u16 B3protocol,
- _cstruct B1configuration,
- _cstruct B2configuration,
- _cstruct B3configuration,
- _cstruct BC,
- _cstruct LLC,
- _cstruct HLC,
- _cstruct BChannelinformation,
- _cstruct Keypadfacility,
- _cstruct Useruserdata,
- _cstruct Facilitydataarray)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x02, 0x80, Messagenumber, adr);
- cmsg->CIPValue = CIPValue;
- cmsg->CalledPartyNumber = CalledPartyNumber;
- cmsg->CallingPartyNumber = CallingPartyNumber;
- cmsg->CalledPartySubaddress = CalledPartySubaddress;
- cmsg->CallingPartySubaddress = CallingPartySubaddress;
- cmsg->B1protocol = B1protocol;
- cmsg->B2protocol = B2protocol;
- cmsg->B3protocol = B3protocol;
- cmsg->B1configuration = B1configuration;
- cmsg->B2configuration = B2configuration;
- cmsg->B3configuration = B3configuration;
- cmsg->BC = BC;
- cmsg->LLC = LLC;
- cmsg->HLC = HLC;
- cmsg->BChannelinformation = BChannelinformation;
- cmsg->Keypadfacility = Keypadfacility;
- cmsg->Useruserdata = Useruserdata;
- cmsg->Facilitydataarray = Facilitydataarray;
-}
-
-static inline void capi_fill_DATA_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u32 Data,
- __u16 DataLength,
- __u16 DataHandle,
- __u16 Flags)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x86, 0x80, Messagenumber, adr);
- cmsg->Data = Data;
- cmsg->DataLength = DataLength;
- cmsg->DataHandle = DataHandle;
- cmsg->Flags = Flags;
-}
-
-static inline void capi_fill_DISCONNECT_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- _cstruct BChannelinformation,
- _cstruct Keypadfacility,
- _cstruct Useruserdata,
- _cstruct Facilitydataarray)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x04, 0x80, Messagenumber, adr);
- cmsg->BChannelinformation = BChannelinformation;
- cmsg->Keypadfacility = Keypadfacility;
- cmsg->Useruserdata = Useruserdata;
- cmsg->Facilitydataarray = Facilitydataarray;
-}
-
-static inline void capi_fill_DISCONNECT_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- _cstruct NCPI)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x84, 0x80, Messagenumber, adr);
- cmsg->NCPI = NCPI;
-}
-
-static inline void capi_fill_MANUFACTURER_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u32 ManuID,
- __u32 Class,
- __u32 Function,
- _cstruct ManuData)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0xff, 0x80, Messagenumber, adr);
- cmsg->ManuID = ManuID;
- cmsg->Class = Class;
- cmsg->Function = Function;
- cmsg->ManuData = ManuData;
-}
-
-static inline void capi_fill_RESET_B3_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- _cstruct NCPI)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x87, 0x80, Messagenumber, adr);
- cmsg->NCPI = NCPI;
-}
-
-static inline void capi_fill_SELECT_B_PROTOCOL_REQ(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u16 B1protocol,
- __u16 B2protocol,
- __u16 B3protocol,
- _cstruct B1configuration,
- _cstruct B2configuration,
- _cstruct B3configuration)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x41, 0x80, Messagenumber, adr);
- cmsg->B1protocol = B1protocol;
- cmsg->B2protocol = B2protocol;
- cmsg->B3protocol = B3protocol;
- cmsg->B1configuration = B1configuration;
- cmsg->B2configuration = B2configuration;
- cmsg->B3configuration = B3configuration;
-}
-
-static inline void capi_fill_CONNECT_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u16 Reject,
- __u16 B1protocol,
- __u16 B2protocol,
- __u16 B3protocol,
- _cstruct B1configuration,
- _cstruct B2configuration,
- _cstruct B3configuration,
- _cstruct ConnectedNumber,
- _cstruct ConnectedSubaddress,
- _cstruct LLC,
- _cstruct BChannelinformation,
- _cstruct Keypadfacility,
- _cstruct Useruserdata,
- _cstruct Facilitydataarray)
-{
- capi_cmsg_header(cmsg, ApplId, 0x02, 0x83, Messagenumber, adr);
- cmsg->Reject = Reject;
- cmsg->B1protocol = B1protocol;
- cmsg->B2protocol = B2protocol;
- cmsg->B3protocol = B3protocol;
- cmsg->B1configuration = B1configuration;
- cmsg->B2configuration = B2configuration;
- cmsg->B3configuration = B3configuration;
- cmsg->ConnectedNumber = ConnectedNumber;
- cmsg->ConnectedSubaddress = ConnectedSubaddress;
- cmsg->LLC = LLC;
- cmsg->BChannelinformation = BChannelinformation;
- cmsg->Keypadfacility = Keypadfacility;
- cmsg->Useruserdata = Useruserdata;
- cmsg->Facilitydataarray = Facilitydataarray;
-}
-
-static inline void capi_fill_CONNECT_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x03, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_CONNECT_B3_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x83, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_CONNECT_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u16 Reject,
- _cstruct NCPI)
-{
- capi_cmsg_header(cmsg, ApplId, 0x82, 0x83, Messagenumber, adr);
- cmsg->Reject = Reject;
- cmsg->NCPI = NCPI;
-}
-
-static inline void capi_fill_CONNECT_B3_T90_ACTIVE_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x88, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_DATA_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u16 DataHandle)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x86, 0x83, Messagenumber, adr);
- cmsg->DataHandle = DataHandle;
-}
-
-static inline void capi_fill_DISCONNECT_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x84, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_DISCONNECT_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x04, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_FACILITY_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u16 FacilitySelector)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x80, 0x83, Messagenumber, adr);
- cmsg->FacilitySelector = FacilitySelector;
-}
-
-static inline void capi_fill_INFO_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x08, 0x83, Messagenumber, adr);
-}
-
-static inline void capi_fill_MANUFACTURER_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr,
- __u32 ManuID,
- __u32 Class,
- __u32 Function,
- _cstruct ManuData)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0xff, 0x83, Messagenumber, adr);
- cmsg->ManuID = ManuID;
- cmsg->Class = Class;
- cmsg->Function = Function;
- cmsg->ManuData = ManuData;
-}
-
-static inline void capi_fill_RESET_B3_RESP(_cmsg * cmsg, __u16 ApplId, __u16 Messagenumber,
- __u32 adr)
-{
-
- capi_cmsg_header(cmsg, ApplId, 0x87, 0x83, Messagenumber, adr);
-}
-
#endif /* __CAPIUTIL_H__ */
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 603fbc4e2f70..f613d8529863 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -313,7 +313,6 @@ enum jbd_state_bits {
BH_Revoked, /* Has been revoked from the log */
BH_RevokeValid, /* Revoked flag is valid */
BH_JBDDirty, /* Is dirty but journaled */
- BH_State, /* Pins most journal_head state */
BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
BH_Shadow, /* IO on shadow buffer is running */
BH_Verified, /* Metadata block has been verified ok */
@@ -342,26 +341,6 @@ static inline struct journal_head *bh2jh(struct buffer_head *bh)
return bh->b_private;
}
-static inline void jbd_lock_bh_state(struct buffer_head *bh)
-{
- bit_spin_lock(BH_State, &bh->b_state);
-}
-
-static inline int jbd_trylock_bh_state(struct buffer_head *bh)
-{
- return bit_spin_trylock(BH_State, &bh->b_state);
-}
-
-static inline int jbd_is_locked_bh_state(struct buffer_head *bh)
-{
- return bit_spin_is_locked(BH_State, &bh->b_state);
-}
-
-static inline void jbd_unlock_bh_state(struct buffer_head *bh)
-{
- bit_spin_unlock(BH_State, &bh->b_state);
-}
-
static inline void jbd_lock_bh_journal_head(struct buffer_head *bh)
{
bit_spin_lock(BH_JournalHead, &bh->b_state);
@@ -477,7 +456,9 @@ struct jbd2_revoke_table_s;
* @h_transaction: Which compound transaction is this update a part of?
* @h_journal: Which journal handle belongs to - used iff h_reserved set.
* @h_rsv_handle: Handle reserved for finishing the logical operation.
- * @h_buffer_credits: Number of remaining buffers we are allowed to dirty.
+ * @h_total_credits: Number of remaining buffers we are allowed to add to
+ * journal. These are dirty buffers and revoke descriptor blocks.
+ * @h_revoke_credits: Number of remaining revoke records available for handle
* @h_ref: Reference count on this handle.
* @h_err: Field for caller's use to track errors through large fs operations.
* @h_sync: Flag for sync-on-close.
@@ -487,7 +468,8 @@ struct jbd2_revoke_table_s;
* @h_type: For handle statistics.
* @h_line_no: For handle statistics.
* @h_start_jiffies: Handle Start time.
- * @h_requested_credits: Holds @h_buffer_credits after handle is started.
+ * @h_requested_credits: Holds @h_total_credits after handle is started.
+ * @h_revoke_credits_requested: Holds @h_revoke_credits after handle is started.
* @saved_alloc_context: Saved context while transaction is open.
**/
@@ -504,7 +486,9 @@ struct jbd2_journal_handle
};
handle_t *h_rsv_handle;
- int h_buffer_credits;
+ int h_total_credits;
+ int h_revoke_credits;
+ int h_revoke_credits_requested;
int h_ref;
int h_err;
@@ -556,9 +540,9 @@ struct transaction_chp_stats_s {
* ->jbd_lock_bh_journal_head() (This is "innermost")
*
* j_state_lock
- * ->jbd_lock_bh_state()
+ * ->b_state_lock
*
- * jbd_lock_bh_state()
+ * b_state_lock
* ->j_list_lock
*
* j_state_lock
@@ -681,12 +665,25 @@ struct transaction_s
atomic_t t_updates;
/*
- * Number of buffers reserved for use by all handles in this transaction
- * handle but not yet modified. [none]
+ * Number of blocks reserved for this transaction in the journal.
+ * This is including all credits reserved when starting transaction
+ * handles as well as all journal descriptor blocks needed for this
+ * transaction. [none]
*/
atomic_t t_outstanding_credits;
/*
+ * Number of revoke records for this transaction added by already
+ * stopped handles. [none]
+ */
+ atomic_t t_outstanding_revokes;
+
+ /*
+ * How many handles used this transaction? [none]
+ */
+ atomic_t t_handle_count;
+
+ /*
* Forward and backward links for the circular list of all transactions
* awaiting checkpoint. [j_list_lock]
*/
@@ -704,11 +701,6 @@ struct transaction_s
ktime_t t_start_time;
/*
- * How many handles used this transaction? [none]
- */
- atomic_t t_handle_count;
-
- /*
* This transaction is being forced and some process is
* waiting for it to finish.
*/
@@ -1025,6 +1017,13 @@ struct journal_s
int j_max_transaction_buffers;
/**
+ * @j_revoke_records_per_block:
+ *
+ * Number of revoke records that fit in one descriptor block.
+ */
+ int j_revoke_records_per_block;
+
+ /**
* @j_commit_interval:
*
* What is the maximum transaction lifetime before we begin a commit?
@@ -1170,7 +1169,7 @@ struct journal_s
#define jbd2_might_wait_for_commit(j) \
do { \
rwsem_acquire(&j->j_trans_commit_map, 0, 0, _THIS_IP_); \
- rwsem_release(&j->j_trans_commit_map, 1, _THIS_IP_); \
+ rwsem_release(&j->j_trans_commit_map, _THIS_IP_); \
} while (0)
/* journal feature predicate functions */
@@ -1257,7 +1256,7 @@ JBD2_FEATURE_INCOMPAT_FUNCS(csum3, CSUM_V3)
/* Filing buffers */
extern void jbd2_journal_unfile_buffer(journal_t *, struct journal_head *);
-extern void __jbd2_journal_refile_buffer(struct journal_head *);
+extern bool __jbd2_journal_refile_buffer(struct journal_head *);
extern void jbd2_journal_refile_buffer(journal_t *, struct journal_head *);
extern void __jbd2_journal_file_buffer(struct journal_head *, transaction_t *, int);
extern void __journal_free_buffer(struct journal_head *bh);
@@ -1358,14 +1357,16 @@ static inline handle_t *journal_current_handle(void)
extern handle_t *jbd2_journal_start(journal_t *, int nblocks);
extern handle_t *jbd2__journal_start(journal_t *, int blocks, int rsv_blocks,
- gfp_t gfp_mask, unsigned int type,
- unsigned int line_no);
+ int revoke_records, gfp_t gfp_mask,
+ unsigned int type, unsigned int line_no);
extern int jbd2_journal_restart(handle_t *, int nblocks);
-extern int jbd2__journal_restart(handle_t *, int nblocks, gfp_t gfp_mask);
+extern int jbd2__journal_restart(handle_t *, int nblocks,
+ int revoke_records, gfp_t gfp_mask);
extern int jbd2_journal_start_reserved(handle_t *handle,
unsigned int type, unsigned int line_no);
extern void jbd2_journal_free_reserved(handle_t *handle);
-extern int jbd2_journal_extend (handle_t *, int nblocks);
+extern int jbd2_journal_extend(handle_t *handle, int nblocks,
+ int revoke_records);
extern int jbd2_journal_get_write_access(handle_t *, struct buffer_head *);
extern int jbd2_journal_get_create_access (handle_t *, struct buffer_head *);
extern int jbd2_journal_get_undo_access(handle_t *, struct buffer_head *);
@@ -1402,7 +1403,6 @@ extern int jbd2_journal_skip_recovery (journal_t *);
extern void jbd2_journal_update_sb_errno(journal_t *);
extern int jbd2_journal_update_sb_log_tail (journal_t *, tid_t,
unsigned long, int);
-extern void __jbd2_journal_abort_hard (journal_t *);
extern void jbd2_journal_abort (journal_t *, int);
extern int jbd2_journal_errno (journal_t *);
extern void jbd2_journal_ack_err (journal_t *);
@@ -1561,37 +1561,18 @@ static inline int jbd2_journal_has_csum_v2or3(journal_t *journal)
}
/*
- * We reserve t_outstanding_credits >> JBD2_CONTROL_BLOCKS_SHIFT for
- * transaction control blocks.
- */
-#define JBD2_CONTROL_BLOCKS_SHIFT 5
-
-/*
- * Return the minimum number of blocks which must be free in the journal
- * before a new transaction may be started. Must be called under j_state_lock.
- */
-static inline int jbd2_space_needed(journal_t *journal)
-{
- int nblocks = journal->j_max_transaction_buffers;
- return nblocks + (nblocks >> JBD2_CONTROL_BLOCKS_SHIFT);
-}
-
-/*
* Return number of free blocks in the log. Must be called under j_state_lock.
*/
static inline unsigned long jbd2_log_space_left(journal_t *journal)
{
/* Allow for rounding errors */
- unsigned long free = journal->j_free - 32;
+ long free = journal->j_free - 32;
if (journal->j_committing_transaction) {
- unsigned long committing = atomic_read(&journal->
- j_committing_transaction->t_outstanding_credits);
-
- /* Transaction + control blocks */
- free -= committing + (committing >> JBD2_CONTROL_BLOCKS_SHIFT);
+ free -= atomic_read(&journal->
+ j_committing_transaction->t_outstanding_credits);
}
- return free;
+ return max_t(long, free, 0);
}
/*
@@ -1645,6 +1626,20 @@ static inline tid_t jbd2_get_latest_transaction(journal_t *journal)
return tid;
}
+static inline int jbd2_handle_buffer_credits(handle_t *handle)
+{
+ journal_t *journal;
+
+ if (!handle->h_reserved)
+ journal = handle->h_transaction->t_journal;
+ else
+ journal = handle->h_journal;
+
+ return handle->h_total_credits -
+ DIV_ROUND_UP(handle->h_revoke_credits_requested,
+ journal->j_revoke_records_per_block);
+}
+
#ifdef __KERNEL__
#define buffer_trace_init(bh) do {} while (0)
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 1b6d31da7cbc..fed6ba96c527 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -8,6 +8,7 @@
#include <linux/types.h>
#include <linux/time.h>
#include <linux/timex.h>
+#include <vdso/jiffies.h>
#include <asm/param.h> /* for HZ */
#include <generated/timeconst.h>
@@ -59,9 +60,6 @@
extern int register_refined_jiffies(long clock_tick_rate);
-/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */
-#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ)
-
/* TICK_USEC is the time between ticks in usec assuming SHIFTED_HZ */
#define TICK_USEC ((USEC_PER_SEC + HZ/2) / HZ)
@@ -422,26 +420,6 @@ static __always_inline unsigned long usecs_to_jiffies(const unsigned int u)
extern unsigned long timespec64_to_jiffies(const struct timespec64 *value);
extern void jiffies_to_timespec64(const unsigned long jiffies,
struct timespec64 *value);
-static inline unsigned long timespec_to_jiffies(const struct timespec *value)
-{
- struct timespec64 ts = timespec_to_timespec64(*value);
-
- return timespec64_to_jiffies(&ts);
-}
-
-static inline void jiffies_to_timespec(const unsigned long jiffies,
- struct timespec *value)
-{
- struct timespec64 ts;
-
- jiffies_to_timespec64(jiffies, &ts);
- *value = timespec64_to_timespec(ts);
-}
-
-extern unsigned long timeval_to_jiffies(const struct timeval *value);
-extern void jiffies_to_timeval(const unsigned long jiffies,
- struct timeval *value);
-
extern clock_t jiffies_to_clock_t(unsigned long x);
static inline clock_t jiffies_delta_to_clock_t(long delta)
{
diff --git a/include/linux/journal-head.h b/include/linux/journal-head.h
index 9fb870524314..75bc56109031 100644
--- a/include/linux/journal-head.h
+++ b/include/linux/journal-head.h
@@ -11,6 +11,8 @@
#ifndef JOURNAL_HEAD_H_INCLUDED
#define JOURNAL_HEAD_H_INCLUDED
+#include <linux/spinlock.h>
+
typedef unsigned int tid_t; /* Unique transaction ID */
typedef struct transaction_s transaction_t; /* Compound transaction type */
@@ -24,13 +26,18 @@ struct journal_head {
struct buffer_head *b_bh;
/*
+ * Protect the buffer head state
+ */
+ spinlock_t b_state_lock;
+
+ /*
* Reference count - see description in journal.c
* [jbd_lock_bh_journal_head()]
*/
int b_jcount;
/*
- * Journalling list for this buffer [jbd_lock_bh_state()]
+ * Journalling list for this buffer [b_state_lock]
* NOTE: We *cannot* combine this with b_modified into a bitfield
* as gcc would then (which the C standard allows but which is
* very unuseful) make 64-bit accesses to the bitfield and clobber
@@ -41,20 +48,20 @@ struct journal_head {
/*
* This flag signals the buffer has been modified by
* the currently running transaction
- * [jbd_lock_bh_state()]
+ * [b_state_lock]
*/
unsigned b_modified;
/*
* Copy of the buffer data frozen for writing to the log.
- * [jbd_lock_bh_state()]
+ * [b_state_lock]
*/
char *b_frozen_data;
/*
* Pointer to a saved copy of the buffer containing no uncommitted
* deallocation references, so that allocations can avoid overwriting
- * uncommitted deletes. [jbd_lock_bh_state()]
+ * uncommitted deletes. [b_state_lock]
*/
char *b_committed_data;
@@ -63,7 +70,7 @@ struct journal_head {
* metadata: either the running transaction or the committing
* transaction (if there is one). Only applies to buffers on a
* transaction's data or metadata journaling list.
- * [j_list_lock] [jbd_lock_bh_state()]
+ * [j_list_lock] [b_state_lock]
* Either of these locks is enough for reading, both are needed for
* changes.
*/
@@ -73,13 +80,13 @@ struct journal_head {
* Pointer to the running compound transaction which is currently
* modifying the buffer's metadata, if there was already a transaction
* committing it when the new transaction touched it.
- * [t_list_lock] [jbd_lock_bh_state()]
+ * [t_list_lock] [b_state_lock]
*/
transaction_t *b_next_transaction;
/*
* Doubly-linked list of buffers on a transaction's data, metadata or
- * forget queue. [t_list_lock] [jbd_lock_bh_state()]
+ * forget queue. [t_list_lock] [b_state_lock]
*/
struct journal_head *b_tnext, *b_tprev;
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index cc8a03cc9674..5cde9e7c2664 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -70,8 +70,18 @@ struct kasan_cache {
int free_meta_offset;
};
+/*
+ * These functions provide a special case to support backing module
+ * allocations with real shadow memory. With KASAN vmalloc, the special
+ * case is unnecessary, as the work is handled in the generic case.
+ */
+#ifndef CONFIG_KASAN_VMALLOC
int kasan_module_alloc(void *addr, size_t size);
void kasan_free_shadow(const struct vm_struct *vm);
+#else
+static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
+static inline void kasan_free_shadow(const struct vm_struct *vm) {}
+#endif
int kasan_add_zero_shadow(void *start, unsigned long size);
void kasan_remove_zero_shadow(void *start, unsigned long size);
@@ -194,4 +204,34 @@ static inline void *kasan_reset_tag(const void *addr)
#endif /* CONFIG_KASAN_SW_TAGS */
+#ifdef CONFIG_KASAN_VMALLOC
+int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
+void kasan_poison_vmalloc(const void *start, unsigned long size);
+void kasan_unpoison_vmalloc(const void *start, unsigned long size);
+void kasan_release_vmalloc(unsigned long start, unsigned long end,
+ unsigned long free_region_start,
+ unsigned long free_region_end);
+#else
+static inline int kasan_populate_vmalloc(unsigned long start,
+ unsigned long size)
+{
+ return 0;
+}
+
+static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
+{ }
+static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
+{ }
+static inline void kasan_release_vmalloc(unsigned long start,
+ unsigned long end,
+ unsigned long free_region_start,
+ unsigned long free_region_end) {}
+#endif
+
+#ifdef CONFIG_KASAN_INLINE
+void kasan_non_canonical_hook(unsigned long addr);
+#else /* CONFIG_KASAN_INLINE */
+static inline void kasan_non_canonical_hook(unsigned long addr) { }
+#endif /* CONFIG_KASAN_INLINE */
+
#endif /* LINUX_KASAN_H */
diff --git a/include/linux/kcov.h b/include/linux/kcov.h
index b76a1807028d..a10e84707d82 100644
--- a/include/linux/kcov.h
+++ b/include/linux/kcov.h
@@ -37,12 +37,35 @@ do { \
(t)->kcov_mode &= ~KCOV_IN_CTXSW; \
} while (0)
+/* See Documentation/dev-tools/kcov.rst for usage details. */
+void kcov_remote_start(u64 handle);
+void kcov_remote_stop(void);
+u64 kcov_common_handle(void);
+
+static inline void kcov_remote_start_common(u64 id)
+{
+ kcov_remote_start(kcov_remote_handle(KCOV_SUBSYSTEM_COMMON, id));
+}
+
+static inline void kcov_remote_start_usb(u64 id)
+{
+ kcov_remote_start(kcov_remote_handle(KCOV_SUBSYSTEM_USB, id));
+}
+
#else
static inline void kcov_task_init(struct task_struct *t) {}
static inline void kcov_task_exit(struct task_struct *t) {}
static inline void kcov_prepare_switch(struct task_struct *t) {}
static inline void kcov_finish_switch(struct task_struct *t) {}
+static inline void kcov_remote_start(u64 handle) {}
+static inline void kcov_remote_stop(void) {}
+static inline u64 kcov_common_handle(void)
+{
+ return 0;
+}
+static inline void kcov_remote_start_common(u64 id) {}
+static inline void kcov_remote_start_usb(u64 id) {}
#endif /* CONFIG_KCOV */
#endif /* _LINUX_KCOV_H */
diff --git a/include/linux/kdb.h b/include/linux/kdb.h
index 68bd88223417..24cd447659e0 100644
--- a/include/linux/kdb.h
+++ b/include/linux/kdb.h
@@ -183,8 +183,6 @@ int kdb_process_cpu(const struct task_struct *p)
return cpu;
}
-/* kdb access to register set for stack dumping */
-extern struct pt_regs *kdb_current_regs;
#ifdef CONFIG_KALLSYMS
extern const char *kdb_walk_kallsyms(loff_t *pos);
#else /* ! CONFIG_KALLSYMS */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d83d403dac2e..9b7a8d74a9d6 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -79,15 +79,6 @@
*/
#define round_down(x, y) ((x) & ~__round_mask(x, y))
-/**
- * FIELD_SIZEOF - get the size of a struct's field
- * @t: the target struct
- * @f: the target struct's field
- * Return: the size of @f in the struct definition without having a
- * declared instance of @t.
- */
-#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f))
-
#define typeof_member(T, m) typeof(((T*)0)->m)
#define DIV_ROUND_UP __KERNEL_DIV_ROUND_UP
@@ -266,6 +257,13 @@ extern void __cant_sleep(const char *file, int line, int preempt_offset);
#define might_sleep_if(cond) do { if (cond) might_sleep(); } while (0)
+#ifndef CONFIG_PREEMPT_RT
+# define cant_migrate() cant_sleep()
+#else
+ /* Placeholder for now */
+# define cant_migrate() do { } while (0)
+#endif
+
/**
* abs - return absolute value of an argument
* @x: the value. If it is unsigned type, it is converted to signed type first.
@@ -328,13 +326,6 @@ extern int oops_may_print(void);
void do_exit(long error_code) __noreturn;
void complete_and_exit(struct completion *, long) __noreturn;
-#ifdef CONFIG_ARCH_HAS_REFCOUNT
-void refcount_error_report(struct pt_regs *regs, const char *err);
-#else
-static inline void refcount_error_report(struct pt_regs *regs, const char *err)
-{ }
-#endif
-
/* Internal, do not use. */
int __must_check _kstrtoul(const char *s, unsigned int base, unsigned long *res);
int __must_check _kstrtol(const char *s, unsigned int base, long *res);
@@ -355,8 +346,7 @@ int __must_check kstrtoll(const char *s, unsigned int base, long long *res);
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
+ * Used as a replacement for the simple_strtoull. Return code must be checked.
*/
static inline int __must_check kstrtoul(const char *s, unsigned int base, unsigned long *res)
{
@@ -384,8 +374,7 @@ static inline int __must_check kstrtoul(const char *s, unsigned int base, unsign
* @res: Where to write the result of the conversion on success.
*
* Returns 0 on success, -ERANGE on overflow and -EINVAL on parsing error.
- * Used as a replacement for the obsolete simple_strtoull. Return code must
- * be checked.
+ * Used as a replacement for the simple_strtoull. Return code must be checked.
*/
static inline int __must_check kstrtol(const char *s, unsigned int base, long *res)
{
@@ -461,7 +450,18 @@ static inline int __must_check kstrtos32_from_user(const char __user *s, size_t
return kstrtoint_from_user(s, count, base, res);
}
-/* Obsolete, do not use. Use kstrto<foo> instead */
+/*
+ * Use kstrto<foo> instead.
+ *
+ * NOTE: simple_strto<foo> does not check for the range overflow and,
+ * depending on the input, may give interesting results.
+ *
+ * Use these functions if and only if you cannot use kstrto<foo>, because
+ * the conversion ends on the first non-digit character, which may be far
+ * beyond the supported range. It might be useful to parse the strings like
+ * 10x50 or 12:21 without altering original string or temporary buffer in use.
+ * Keep in mind above caveat.
+ */
extern unsigned long simple_strtoul(const char *,char **,unsigned int);
extern long simple_strtol(const char *,char **,unsigned int);
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 7ee2bb43b251..89f0745c096d 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -78,6 +78,24 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
return kstat_cpu(cpu).irqs_sum;
}
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
+extern u64 kcpustat_field(struct kernel_cpustat *kcpustat,
+ enum cpu_usage_stat usage, int cpu);
+extern void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu);
+#else
+static inline u64 kcpustat_field(struct kernel_cpustat *kcpustat,
+ enum cpu_usage_stat usage, int cpu)
+{
+ return kcpustat->cpustat[usage];
+}
+
+static inline void kcpustat_cpu_fetch(struct kernel_cpustat *dst, int cpu)
+{
+ *dst = kcpustat_cpu(cpu);
+}
+
+#endif
+
extern void account_user_time(struct task_struct *, u64);
extern void account_guest_time(struct task_struct *, u64);
extern void account_system_time(struct task_struct *, int, u64);
diff --git a/include/linux/kernelcapi.h b/include/linux/kernelcapi.h
index 075fab5f92e1..94ba42bf9da1 100644
--- a/include/linux/kernelcapi.h
+++ b/include/linux/kernelcapi.h
@@ -10,46 +10,12 @@
#ifndef __KERNELCAPI_H__
#define __KERNELCAPI_H__
-
#include <linux/list.h>
#include <linux/skbuff.h>
#include <linux/workqueue.h>
#include <linux/notifier.h>
#include <uapi/linux/kernelcapi.h>
-struct capi20_appl {
- u16 applid;
- capi_register_params rparam;
- void (*recv_message)(struct capi20_appl *ap, struct sk_buff *skb);
- void *private;
-
- /* internal to kernelcapi.o */
- unsigned long nrecvctlpkt;
- unsigned long nrecvdatapkt;
- unsigned long nsentctlpkt;
- unsigned long nsentdatapkt;
- struct mutex recv_mtx;
- struct sk_buff_head recv_queue;
- struct work_struct recv_work;
- int release_in_progress;
-};
-
-u16 capi20_isinstalled(void);
-u16 capi20_register(struct capi20_appl *ap);
-u16 capi20_release(struct capi20_appl *ap);
-u16 capi20_put_message(struct capi20_appl *ap, struct sk_buff *skb);
-u16 capi20_get_manufacturer(u32 contr, u8 buf[CAPI_MANUFACTURER_LEN]);
-u16 capi20_get_version(u32 contr, struct capi_version *verp);
-u16 capi20_get_serial(u32 contr, u8 serial[CAPI_SERIAL_LEN]);
-u16 capi20_get_profile(u32 contr, struct capi_profile *profp);
-int capi20_manufacturer(unsigned long cmd, void __user *data);
-
-#define CAPICTR_UP 0
-#define CAPICTR_DOWN 1
-
-int register_capictr_notifier(struct notifier_block *nb);
-int unregister_capictr_notifier(struct notifier_block *nb);
-
#define CAPI_NOERROR 0x0000
#define CAPI_TOOMANYAPPLS 0x1001
@@ -76,45 +42,4 @@ int unregister_capictr_notifier(struct notifier_block *nb);
#define CAPI_MSGCTRLERNOTSUPPORTEXTEQUIP 0x110a
#define CAPI_MSGCTRLERONLYSUPPORTEXTEQUIP 0x110b
-typedef enum {
- CapiMessageNotSupportedInCurrentState = 0x2001,
- CapiIllContrPlciNcci = 0x2002,
- CapiNoPlciAvailable = 0x2003,
- CapiNoNcciAvailable = 0x2004,
- CapiNoListenResourcesAvailable = 0x2005,
- CapiNoFaxResourcesAvailable = 0x2006,
- CapiIllMessageParmCoding = 0x2007,
-} RESOURCE_CODING_PROBLEM;
-
-typedef enum {
- CapiB1ProtocolNotSupported = 0x3001,
- CapiB2ProtocolNotSupported = 0x3002,
- CapiB3ProtocolNotSupported = 0x3003,
- CapiB1ProtocolParameterNotSupported = 0x3004,
- CapiB2ProtocolParameterNotSupported = 0x3005,
- CapiB3ProtocolParameterNotSupported = 0x3006,
- CapiBProtocolCombinationNotSupported = 0x3007,
- CapiNcpiNotSupported = 0x3008,
- CapiCipValueUnknown = 0x3009,
- CapiFlagsNotSupported = 0x300a,
- CapiFacilityNotSupported = 0x300b,
- CapiDataLengthNotSupportedByCurrentProtocol = 0x300c,
- CapiResetProcedureNotSupportedByCurrentProtocol = 0x300d,
- CapiTeiAssignmentFailed = 0x300e,
-} REQUESTED_SERVICES_PROBLEM;
-
-typedef enum {
- CapiSuccess = 0x0000,
- CapiSupplementaryServiceNotSupported = 0x300e,
- CapiRequestNotAllowedInThisState = 0x3010,
-} SUPPLEMENTARY_SERVICE_INFO;
-
-typedef enum {
- CapiProtocolErrorLayer1 = 0x3301,
- CapiProtocolErrorLayer2 = 0x3302,
- CapiProtocolErrorLayer3 = 0x3303,
- CapiTimeOut = 0x3303, // SuppServiceReason
- CapiCallGivenToOtherApplication = 0x3304,
-} CAPI_REASON;
-
#endif /* __KERNELCAPI_H__ */
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 936b61bd504e..dded2e5a9f42 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -104,21 +104,6 @@ struct kernfs_elem_attr {
struct kernfs_node *notify_next; /* for kernfs_notify() */
};
-/* represent a kernfs node */
-union kernfs_node_id {
- struct {
- /*
- * blktrace will export this struct as a simplified 'struct
- * fid' (which is a big data struction), so userspace can use
- * it to find kernfs node. The layout must match the first two
- * fields of 'struct fid' exactly.
- */
- u32 ino;
- u32 generation;
- };
- u64 id;
-};
-
/*
* kernfs_node - the building block of kernfs hierarchy. Each and every
* kernfs node is represented by single kernfs_node. Most fields are
@@ -155,7 +140,12 @@ struct kernfs_node {
void *priv;
- union kernfs_node_id id;
+ /*
+ * 64bit unique ID. On 64bit ino setups, id is the ino. On 32bit,
+ * the low 32bits are ino and upper generation.
+ */
+ u64 id;
+
unsigned short flags;
umode_t mode;
struct kernfs_iattrs *iattr;
@@ -187,7 +177,8 @@ struct kernfs_root {
/* private fields, do not use outside kernfs proper */
struct idr ino_idr;
- u32 next_generation;
+ u32 last_id_lowbits;
+ u32 id_highbits;
struct kernfs_syscall_ops *syscall_ops;
/* list of kernfs_super_info of this root, protected by kernfs_mutex */
@@ -291,6 +282,34 @@ static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn)
return kn->flags & KERNFS_TYPE_MASK;
}
+static inline ino_t kernfs_id_ino(u64 id)
+{
+ /* id is ino if ino_t is 64bit; otherwise, low 32bits */
+ if (sizeof(ino_t) >= sizeof(u64))
+ return id;
+ else
+ return (u32)id;
+}
+
+static inline u32 kernfs_id_gen(u64 id)
+{
+ /* gen is fixed at 1 if ino_t is 64bit; otherwise, high 32bits */
+ if (sizeof(ino_t) >= sizeof(u64))
+ return 1;
+ else
+ return id >> 32;
+}
+
+static inline ino_t kernfs_ino(struct kernfs_node *kn)
+{
+ return kernfs_id_ino(kn->id);
+}
+
+static inline ino_t kernfs_gen(struct kernfs_node *kn)
+{
+ return kernfs_id_gen(kn->id);
+}
+
/**
* kernfs_enable_ns - enable namespace under a directory
* @kn: directory of interest, should be empty
@@ -382,8 +401,8 @@ void kernfs_kill_sb(struct super_block *sb);
void kernfs_init(void);
-struct kernfs_node *kernfs_get_node_by_id(struct kernfs_root *root,
- const union kernfs_node_id *id);
+struct kernfs_node *kernfs_find_and_get_node_by_id(struct kernfs_root *root,
+ u64 id);
#else /* CONFIG_KERNFS */
static inline enum kernfs_node_type kernfs_type(struct kernfs_node *kn)
diff --git a/include/linux/ktime.h b/include/linux/ktime.h
index b2bb44f87f5a..42d2e6ac35f2 100644
--- a/include/linux/ktime.h
+++ b/include/linux/ktime.h
@@ -66,33 +66,15 @@ static inline ktime_t ktime_set(const s64 secs, const unsigned long nsecs)
*/
#define ktime_sub_ns(kt, nsval) ((kt) - (nsval))
-/* convert a timespec to ktime_t format: */
-static inline ktime_t timespec_to_ktime(struct timespec ts)
-{
- return ktime_set(ts.tv_sec, ts.tv_nsec);
-}
-
/* convert a timespec64 to ktime_t format: */
static inline ktime_t timespec64_to_ktime(struct timespec64 ts)
{
return ktime_set(ts.tv_sec, ts.tv_nsec);
}
-/* convert a timeval to ktime_t format: */
-static inline ktime_t timeval_to_ktime(struct timeval tv)
-{
- return ktime_set(tv.tv_sec, tv.tv_usec * NSEC_PER_USEC);
-}
-
-/* Map the ktime_t to timespec conversion to ns_to_timespec function */
-#define ktime_to_timespec(kt) ns_to_timespec((kt))
-
/* Map the ktime_t to timespec conversion to ns_to_timespec function */
#define ktime_to_timespec64(kt) ns_to_timespec64((kt))
-/* Map the ktime_t to timeval conversion to ns_to_timeval function */
-#define ktime_to_timeval(kt) ns_to_timeval((kt))
-
/* Convert ktime_t to nanoseconds */
static inline s64 ktime_to_ns(const ktime_t kt)
{
@@ -216,25 +198,6 @@ static inline ktime_t ktime_sub_ms(const ktime_t kt, const u64 msec)
extern ktime_t ktime_add_safe(const ktime_t lhs, const ktime_t rhs);
/**
- * ktime_to_timespec_cond - convert a ktime_t variable to timespec
- * format only if the variable contains data
- * @kt: the ktime_t variable to convert
- * @ts: the timespec variable to store the result in
- *
- * Return: %true if there was a successful conversion, %false if kt was 0.
- */
-static inline __must_check bool ktime_to_timespec_cond(const ktime_t kt,
- struct timespec *ts)
-{
- if (kt) {
- *ts = ktime_to_timespec(kt);
- return true;
- } else {
- return false;
- }
-}
-
-/**
* ktime_to_timespec64_cond - convert a ktime_t variable to timespec64
* format only if the variable contains data
* @kt: the ktime_t variable to convert
@@ -253,14 +216,7 @@ static inline __must_check bool ktime_to_timespec64_cond(const ktime_t kt,
}
}
-/*
- * The resolution of the clocks. The resolution value is returned in
- * the clock_getres() system call to give application programmers an
- * idea of the (in)accuracy of timers. Timer values are rounded up to
- * this resolution values.
- */
-#define LOW_RES_NSEC TICK_NSEC
-#define KTIME_LOW_RES (LOW_RES_NSEC)
+#include <vdso/ktime.h>
static inline ktime_t ns_to_ktime(u64 ns)
{
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d41c521a39da..bcb9b2ac0791 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -149,7 +149,7 @@ static inline bool is_error_page(struct page *page)
#define KVM_REQUEST_ARCH_BASE 8
#define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \
- BUILD_BUG_ON((unsigned)(nr) >= (FIELD_SIZEOF(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
+ BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \
(unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \
})
#define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0)
@@ -157,8 +157,6 @@ static inline bool is_error_page(struct page *page)
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
-extern struct kmem_cache *kvm_vcpu_cache;
-
extern struct mutex kvm_lock;
extern struct list_head vm_list;
@@ -204,7 +202,7 @@ struct kvm_async_pf {
struct list_head queue;
struct kvm_vcpu *vcpu;
struct mm_struct *mm;
- gva_t gva;
+ gpa_t cr2_or_gpa;
unsigned long addr;
struct kvm_arch_async_pf arch;
bool wakeup_all;
@@ -212,8 +210,8 @@ struct kvm_async_pf {
void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
-int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
- struct kvm_arch_async_pf *arch);
+int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
+ unsigned long hva, struct kvm_arch_async_pf *arch);
int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
#endif
@@ -266,7 +264,8 @@ struct kvm_vcpu {
struct preempt_notifier preempt_notifier;
#endif
int cpu;
- int vcpu_id;
+ int vcpu_id; /* id given by userspace at creation */
+ int vcpu_idx; /* index in kvm->vcpus array */
int srcu_idx;
int mode;
u64 requests;
@@ -278,7 +277,6 @@ struct kvm_vcpu {
struct mutex mutex;
struct kvm_run *run;
- int guest_xcr0_loaded;
struct swait_queue_head wq;
struct pid __rcu *pid;
int sigset_active;
@@ -571,13 +569,7 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
{
- struct kvm_vcpu *tmp;
- int idx;
-
- kvm_for_each_vcpu(idx, tmp, vcpu->kvm)
- if (tmp == vcpu)
- return idx;
- BUG();
+ return vcpu->vcpu_idx;
}
#define kvm_for_each_memslot(memslot, slots) \
@@ -585,8 +577,7 @@ static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
memslot++)
-int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
-void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
+void kvm_vcpu_destroy(struct kvm_vcpu *vcpu);
void vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
@@ -622,6 +613,7 @@ void kvm_exit(void);
void kvm_get_kvm(struct kvm *kvm);
void kvm_put_kvm(struct kvm *kvm);
+void kvm_put_kvm_no_destroy(struct kvm *kvm);
static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
{
@@ -728,10 +720,9 @@ void kvm_set_pfn_dirty(kvm_pfn_t pfn);
void kvm_set_pfn_accessed(kvm_pfn_t pfn);
void kvm_get_pfn(kvm_pfn_t pfn);
+void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache);
int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int len);
-int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
- unsigned long len);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
void *data, unsigned long len);
@@ -746,11 +737,33 @@ int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
unsigned long len);
int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
gpa_t gpa, unsigned long len);
+
+#define __kvm_put_guest(kvm, gfn, offset, value, type) \
+({ \
+ unsigned long __addr = gfn_to_hva(kvm, gfn); \
+ type __user *__uaddr = (type __user *)(__addr + offset); \
+ int __ret = -EFAULT; \
+ \
+ if (!kvm_is_error_hva(__addr)) \
+ __ret = put_user(value, __uaddr); \
+ if (!__ret) \
+ mark_page_dirty(kvm, gfn); \
+ __ret; \
+})
+
+#define kvm_put_guest(kvm, gpa, value, type) \
+({ \
+ gpa_t __gpa = gpa; \
+ struct kvm *__kvm = kvm; \
+ __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \
+ offset_in_page(__gpa), (value), type); \
+})
+
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
-unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
+unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn);
void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
@@ -758,8 +771,12 @@ struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn
kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map);
+int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map,
+ struct gfn_to_pfn_cache *cache, bool atomic);
struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty);
+int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map,
+ struct gfn_to_pfn_cache *cache, bool dirty, bool atomic);
unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
@@ -791,6 +808,8 @@ void kvm_reload_remote_mmus(struct kvm *kvm);
bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req,
unsigned long *vcpu_bitmap, cpumask_var_t tmp);
bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
+bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req,
+ unsigned long *vcpu_bitmap);
long kvm_arch_dev_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg);
@@ -848,16 +867,12 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
int kvm_arch_init(void *opaque);
void kvm_arch_exit(void);
-int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
-void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
-
void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
-void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
-struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
+int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id);
+int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
@@ -874,6 +889,8 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu);
+int kvm_arch_post_init_vm(struct kvm *kvm);
+void kvm_arch_pre_destroy_vm(struct kvm *kvm);
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
/*
@@ -963,10 +980,10 @@ void kvm_arch_destroy_vm(struct kvm *kvm);
void kvm_arch_sync_events(struct kvm *kvm);
int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
-void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
bool kvm_is_reserved_pfn(kvm_pfn_t pfn);
bool kvm_is_zone_device_pfn(kvm_pfn_t pfn);
+bool kvm_is_transparent_hugepage(kvm_pfn_t pfn);
struct kvm_irq_ack_notifier {
struct hlist_node link;
@@ -1090,9 +1107,8 @@ enum kvm_stat_kind {
};
struct kvm_stat_data {
- int offset;
- int mode;
struct kvm *kvm;
+ struct kvm_stats_debugfs_item *dbgfs_item;
};
struct kvm_stats_debugfs_item {
@@ -1101,6 +1117,10 @@ struct kvm_stats_debugfs_item {
enum kvm_stat_kind kind;
int mode;
};
+
+#define KVM_DBGFS_GET_MODE(dbgfs_item) \
+ ((dbgfs_item)->mode ? (dbgfs_item)->mode : 0644)
+
extern struct kvm_stats_debugfs_item debugfs_entries[];
extern struct dentry *kvm_debugfs_dir;
@@ -1241,7 +1261,7 @@ extern unsigned int halt_poll_ns_grow_start;
extern unsigned int halt_poll_ns_shrink;
struct kvm_device {
- struct kvm_device_ops *ops;
+ const struct kvm_device_ops *ops;
struct kvm *kvm;
void *private;
struct list_head vm_node;
@@ -1294,7 +1314,7 @@ struct kvm_device_ops {
void kvm_device_get(struct kvm_device *dev);
void kvm_device_put(struct kvm_device *dev);
struct kvm_device *kvm_device_from_filp(struct file *filp);
-int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
+int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type);
void kvm_unregister_device_ops(u32 type);
extern struct kvm_device_ops kvm_mpic_ops;
@@ -1323,6 +1343,9 @@ static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
}
#endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
+struct kvm_vcpu *kvm_get_running_vcpu(void);
+struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void);
+
#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
bool kvm_arch_has_irq_bypass(void);
int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h
index bde5374ae021..68e84cf42a3f 100644
--- a/include/linux/kvm_types.h
+++ b/include/linux/kvm_types.h
@@ -18,7 +18,7 @@ struct kvm_memslots;
enum kvm_mr_change;
-#include <asm/types.h>
+#include <linux/types.h>
/*
* Address types:
@@ -35,6 +35,8 @@ typedef unsigned long gva_t;
typedef u64 gpa_t;
typedef u64 gfn_t;
+#define GPA_INVALID (~(gpa_t)0)
+
typedef unsigned long hva_t;
typedef u64 hpa_t;
typedef u64 hfn_t;
@@ -49,4 +51,11 @@ struct gfn_to_hva_cache {
struct kvm_memory_slot *memslot;
};
+struct gfn_to_pfn_cache {
+ u64 generation;
+ gfn_t gfn;
+ kvm_pfn_t pfn;
+ bool dirty;
+};
+
#endif /* __KVM_TYPES_H__ */
diff --git a/include/linux/led-class-flash.h b/include/linux/led-class-flash.h
index 1e824963af17..21a3358a1731 100644
--- a/include/linux/led-class-flash.h
+++ b/include/linux/led-class-flash.h
@@ -94,12 +94,15 @@ static inline struct led_classdev_flash *lcdev_to_flcdev(
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_classdev_flash_register_ext(struct device *parent,
- struct led_classdev_flash *fled_cdev,
- struct led_init_data *init_data);
+int led_classdev_flash_register_ext(struct device *parent,
+ struct led_classdev_flash *fled_cdev,
+ struct led_init_data *init_data);
-#define led_classdev_flash_register(parent, fled_cdev) \
- led_classdev_flash_register_ext(parent, fled_cdev, NULL)
+static inline int led_classdev_flash_register(struct device *parent,
+ struct led_classdev_flash *fled_cdev)
+{
+ return led_classdev_flash_register_ext(parent, fled_cdev, NULL);
+}
/**
* led_classdev_flash_unregister - unregisters an object of led_classdev class
@@ -108,7 +111,21 @@ extern int led_classdev_flash_register_ext(struct device *parent,
*
* Unregister a previously registered via led_classdev_flash_register object
*/
-extern void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev);
+void led_classdev_flash_unregister(struct led_classdev_flash *fled_cdev);
+
+int devm_led_classdev_flash_register_ext(struct device *parent,
+ struct led_classdev_flash *fled_cdev,
+ struct led_init_data *init_data);
+
+
+static inline int devm_led_classdev_flash_register(struct device *parent,
+ struct led_classdev_flash *fled_cdev)
+{
+ return devm_led_classdev_flash_register_ext(parent, fled_cdev, NULL);
+}
+
+void devm_led_classdev_flash_unregister(struct device *parent,
+ struct led_classdev_flash *fled_cdev);
/**
* led_set_flash_strobe - setup flash strobe
@@ -156,8 +173,8 @@ static inline int led_get_flash_strobe(struct led_classdev_flash *fled_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_set_flash_brightness(struct led_classdev_flash *fled_cdev,
- u32 brightness);
+int led_set_flash_brightness(struct led_classdev_flash *fled_cdev,
+ u32 brightness);
/**
* led_update_flash_brightness - update flash LED brightness
@@ -168,7 +185,7 @@ extern int led_set_flash_brightness(struct led_classdev_flash *fled_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_update_flash_brightness(struct led_classdev_flash *fled_cdev);
+int led_update_flash_brightness(struct led_classdev_flash *fled_cdev);
/**
* led_set_flash_timeout - set flash LED timeout
@@ -179,8 +196,7 @@ extern int led_update_flash_brightness(struct led_classdev_flash *fled_cdev);
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_set_flash_timeout(struct led_classdev_flash *fled_cdev,
- u32 timeout);
+int led_set_flash_timeout(struct led_classdev_flash *fled_cdev, u32 timeout);
/**
* led_get_flash_fault - get the flash LED fault
@@ -191,7 +207,6 @@ extern int led_set_flash_timeout(struct led_classdev_flash *fled_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_get_flash_fault(struct led_classdev_flash *fled_cdev,
- u32 *fault);
+int led_get_flash_fault(struct led_classdev_flash *fled_cdev, u32 *fault);
#endif /* __LINUX_FLASH_LEDS_H_INCLUDED */
diff --git a/include/linux/leds-bd2802.h b/include/linux/leds-bd2802.h
index dd93c8d787b4..ec577f5f8707 100644
--- a/include/linux/leds-bd2802.h
+++ b/include/linux/leds-bd2802.h
@@ -11,7 +11,6 @@
#define _LEDS_BD2802_H_
struct bd2802_led_platform_data{
- int reset_gpio;
u8 rgb_time;
};
diff --git a/include/linux/leds.h b/include/linux/leds.h
index efb309dba914..75353e5f9d13 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -20,6 +20,7 @@
struct device;
struct led_pattern;
+struct device_node;
/*
* LED Core
*/
@@ -161,7 +162,7 @@ struct led_classdev {
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_classdev_register_ext(struct device *parent,
+int led_classdev_register_ext(struct device *parent,
struct led_classdev *led_cdev,
struct led_init_data *init_data);
@@ -181,7 +182,7 @@ static inline int led_classdev_register(struct device *parent,
return led_classdev_register_ext(parent, led_cdev, NULL);
}
-extern int devm_led_classdev_register_ext(struct device *parent,
+int devm_led_classdev_register_ext(struct device *parent,
struct led_classdev *led_cdev,
struct led_init_data *init_data);
@@ -190,11 +191,16 @@ static inline int devm_led_classdev_register(struct device *parent,
{
return devm_led_classdev_register_ext(parent, led_cdev, NULL);
}
-extern void led_classdev_unregister(struct led_classdev *led_cdev);
-extern void devm_led_classdev_unregister(struct device *parent,
- struct led_classdev *led_cdev);
-extern void led_classdev_suspend(struct led_classdev *led_cdev);
-extern void led_classdev_resume(struct led_classdev *led_cdev);
+void led_classdev_unregister(struct led_classdev *led_cdev);
+void devm_led_classdev_unregister(struct device *parent,
+ struct led_classdev *led_cdev);
+void led_classdev_suspend(struct led_classdev *led_cdev);
+void led_classdev_resume(struct led_classdev *led_cdev);
+
+extern struct led_classdev *of_led_get(struct device_node *np, int index);
+extern void led_put(struct led_classdev *led_cdev);
+struct led_classdev *__must_check devm_of_led_get(struct device *dev,
+ int index);
/**
* led_blink_set - set blinking with software fallback
@@ -211,9 +217,8 @@ extern void led_classdev_resume(struct led_classdev *led_cdev);
* led_cdev->brightness_set() will not stop the blinking,
* use led_classdev_brightness_set() instead.
*/
-extern void led_blink_set(struct led_classdev *led_cdev,
- unsigned long *delay_on,
- unsigned long *delay_off);
+void led_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on,
+ unsigned long *delay_off);
/**
* led_blink_set_oneshot - do a oneshot software blink
* @led_cdev: the LED to start blinking
@@ -228,10 +233,9 @@ extern void led_blink_set(struct led_classdev *led_cdev,
* If invert is set, led blinks for delay_off first, then for
* delay_on and leave the led on after the on-off cycle.
*/
-extern void led_blink_set_oneshot(struct led_classdev *led_cdev,
- unsigned long *delay_on,
- unsigned long *delay_off,
- int invert);
+void led_blink_set_oneshot(struct led_classdev *led_cdev,
+ unsigned long *delay_on, unsigned long *delay_off,
+ int invert);
/**
* led_set_brightness - set LED brightness
* @led_cdev: the LED to set
@@ -241,8 +245,8 @@ extern void led_blink_set_oneshot(struct led_classdev *led_cdev,
* software blink timer that implements blinking when the
* hardware doesn't. This function is guaranteed not to sleep.
*/
-extern void led_set_brightness(struct led_classdev *led_cdev,
- enum led_brightness brightness);
+void led_set_brightness(struct led_classdev *led_cdev,
+ enum led_brightness brightness);
/**
* led_set_brightness_sync - set LED brightness synchronously
@@ -255,8 +259,8 @@ extern void led_set_brightness(struct led_classdev *led_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_set_brightness_sync(struct led_classdev *led_cdev,
- enum led_brightness value);
+int led_set_brightness_sync(struct led_classdev *led_cdev,
+ enum led_brightness value);
/**
* led_update_brightness - update LED brightness
@@ -267,7 +271,7 @@ extern int led_set_brightness_sync(struct led_classdev *led_cdev,
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_update_brightness(struct led_classdev *led_cdev);
+int led_update_brightness(struct led_classdev *led_cdev);
/**
* led_get_default_pattern - return default pattern
@@ -279,8 +283,7 @@ extern int led_update_brightness(struct led_classdev *led_cdev);
* Return: Allocated array of integers with default pattern from device tree
* or NULL. Caller is responsible for kfree().
*/
-extern u32 *led_get_default_pattern(struct led_classdev *led_cdev,
- unsigned int *size);
+u32 *led_get_default_pattern(struct led_classdev *led_cdev, unsigned int *size);
/**
* led_sysfs_disable - disable LED sysfs interface
@@ -288,7 +291,7 @@ extern u32 *led_get_default_pattern(struct led_classdev *led_cdev,
*
* Disable the led_cdev's sysfs interface.
*/
-extern void led_sysfs_disable(struct led_classdev *led_cdev);
+void led_sysfs_disable(struct led_classdev *led_cdev);
/**
* led_sysfs_enable - enable LED sysfs interface
@@ -296,7 +299,7 @@ extern void led_sysfs_disable(struct led_classdev *led_cdev);
*
* Enable the led_cdev's sysfs interface.
*/
-extern void led_sysfs_enable(struct led_classdev *led_cdev);
+void led_sysfs_enable(struct led_classdev *led_cdev);
/**
* led_compose_name - compose LED class device name
@@ -310,8 +313,8 @@ extern void led_sysfs_enable(struct led_classdev *led_cdev);
*
* Returns: 0 on success or negative error value on failure
*/
-extern int led_compose_name(struct device *dev, struct led_init_data *init_data,
- char *led_classdev_name);
+int led_compose_name(struct device *dev, struct led_init_data *init_data,
+ char *led_classdev_name);
/**
* led_sysfs_is_disabled - check if LED sysfs interface is disabled
@@ -360,33 +363,25 @@ struct led_trigger {
#define led_trigger_get_led(dev) ((struct led_classdev *)dev_get_drvdata((dev)))
#define led_trigger_get_drvdata(dev) (led_get_trigger_data(led_trigger_get_led(dev)))
-ssize_t led_trigger_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count);
-ssize_t led_trigger_show(struct device *dev, struct device_attribute *attr,
- char *buf);
-
/* Registration functions for complex triggers */
-extern int led_trigger_register(struct led_trigger *trigger);
-extern void led_trigger_unregister(struct led_trigger *trigger);
-extern int devm_led_trigger_register(struct device *dev,
+int led_trigger_register(struct led_trigger *trigger);
+void led_trigger_unregister(struct led_trigger *trigger);
+int devm_led_trigger_register(struct device *dev,
struct led_trigger *trigger);
-extern void led_trigger_register_simple(const char *name,
+void led_trigger_register_simple(const char *name,
struct led_trigger **trigger);
-extern void led_trigger_unregister_simple(struct led_trigger *trigger);
-extern void led_trigger_event(struct led_trigger *trigger,
- enum led_brightness event);
-extern void led_trigger_blink(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off);
-extern void led_trigger_blink_oneshot(struct led_trigger *trigger,
- unsigned long *delay_on,
- unsigned long *delay_off,
- int invert);
-extern void led_trigger_set_default(struct led_classdev *led_cdev);
-extern int led_trigger_set(struct led_classdev *led_cdev,
- struct led_trigger *trigger);
-extern void led_trigger_remove(struct led_classdev *led_cdev);
+void led_trigger_unregister_simple(struct led_trigger *trigger);
+void led_trigger_event(struct led_trigger *trigger, enum led_brightness event);
+void led_trigger_blink(struct led_trigger *trigger, unsigned long *delay_on,
+ unsigned long *delay_off);
+void led_trigger_blink_oneshot(struct led_trigger *trigger,
+ unsigned long *delay_on,
+ unsigned long *delay_off,
+ int invert);
+void led_trigger_set_default(struct led_classdev *led_cdev);
+int led_trigger_set(struct led_classdev *led_cdev, struct led_trigger *trigger);
+void led_trigger_remove(struct led_classdev *led_cdev);
static inline void led_set_trigger_data(struct led_classdev *led_cdev,
void *trigger_data)
@@ -414,8 +409,7 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
* This is meant to be used on triggers with statically
* allocated name.
*/
-extern void led_trigger_rename_static(const char *name,
- struct led_trigger *trig);
+void led_trigger_rename_static(const char *name, struct led_trigger *trig);
#define module_led_trigger(__led_trigger) \
module_driver(__led_trigger, led_trigger_register, \
@@ -457,20 +451,20 @@ static inline void *led_get_trigger_data(struct led_classdev *led_cdev)
/* Trigger specific functions */
#ifdef CONFIG_LEDS_TRIGGER_DISK
-extern void ledtrig_disk_activity(bool write);
+void ledtrig_disk_activity(bool write);
#else
static inline void ledtrig_disk_activity(bool write) {}
#endif
#ifdef CONFIG_LEDS_TRIGGER_MTD
-extern void ledtrig_mtd_activity(void);
+void ledtrig_mtd_activity(void);
#else
static inline void ledtrig_mtd_activity(void) {}
#endif
#if defined(CONFIG_LEDS_TRIGGER_CAMERA) || defined(CONFIG_LEDS_TRIGGER_CAMERA_MODULE)
-extern void ledtrig_flash_ctrl(bool on);
-extern void ledtrig_torch_ctrl(bool on);
+void ledtrig_flash_ctrl(bool on);
+void ledtrig_torch_ctrl(bool on);
#else
static inline void ledtrig_flash_ctrl(bool on) {}
static inline void ledtrig_torch_ctrl(bool on) {}
@@ -550,7 +544,7 @@ enum cpu_led_event {
CPU_LED_HALTED, /* Machine shutdown */
};
#ifdef CONFIG_LEDS_TRIGGER_CPU
-extern void ledtrig_cpu(enum cpu_led_event evt);
+void ledtrig_cpu(enum cpu_led_event evt);
#else
static inline void ledtrig_cpu(enum cpu_led_event evt)
{
@@ -559,7 +553,7 @@ static inline void ledtrig_cpu(enum cpu_led_event evt)
#endif
#ifdef CONFIG_LEDS_BRIGHTNESS_HW_CHANGED
-extern void led_classdev_notify_brightness_hw_changed(
+void led_classdev_notify_brightness_hw_changed(
struct led_classdev *led_cdev, enum led_brightness brightness);
#else
static inline void led_classdev_notify_brightness_hw_changed(
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 207e7ee764ce..cffa4714bfa8 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -57,8 +57,6 @@
#define VPRINTK(fmt, args...)
#endif /* ATA_DEBUG */
-#define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args)
-
#define ata_print_version_once(dev, version) \
({ \
static bool __print_once; \
@@ -176,6 +174,7 @@ enum {
ATA_DEV_NONE = 11, /* no device */
/* struct ata_link flags */
+ /* NOTE: struct ata_force_param currently stores lflags in u16 */
ATA_LFLAG_NO_HRST = (1 << 1), /* avoid hardreset */
ATA_LFLAG_NO_SRST = (1 << 2), /* avoid softreset */
ATA_LFLAG_ASSUME_ATA = (1 << 3), /* assume ATA class */
@@ -484,6 +483,7 @@ enum hsm_task_states {
};
enum ata_completion_errors {
+ AC_ERR_OK = 0, /* no error */
AC_ERR_DEV = (1 << 0), /* device reported error */
AC_ERR_HSM = (1 << 1), /* host state machine violation */
AC_ERR_TIMEOUT = (1 << 2), /* timeout */
@@ -530,12 +530,14 @@ typedef int (*ata_reset_fn_t)(struct ata_link *link, unsigned int *classes,
unsigned long deadline);
typedef void (*ata_postreset_fn_t)(struct ata_link *link, unsigned int *classes);
-extern struct device_attribute dev_attr_link_power_management_policy;
extern struct device_attribute dev_attr_unload_heads;
+#ifdef CONFIG_SATA_HOST
+extern struct device_attribute dev_attr_link_power_management_policy;
extern struct device_attribute dev_attr_ncq_prio_enable;
extern struct device_attribute dev_attr_em_message_type;
extern struct device_attribute dev_attr_em_message;
extern struct device_attribute dev_attr_sw_activity;
+#endif
enum sw_activity {
OFF,
@@ -891,9 +893,9 @@ struct ata_port_operations {
/*
* Command execution
*/
- int (*qc_defer)(struct ata_queued_cmd *qc);
- int (*check_atapi_dma)(struct ata_queued_cmd *qc);
- void (*qc_prep)(struct ata_queued_cmd *qc);
+ int (*qc_defer)(struct ata_queued_cmd *qc);
+ int (*check_atapi_dma)(struct ata_queued_cmd *qc);
+ enum ata_completion_errors (*qc_prep)(struct ata_queued_cmd *qc);
unsigned int (*qc_issue)(struct ata_queued_cmd *qc);
bool (*qc_fill_rtf)(struct ata_queued_cmd *qc);
@@ -1019,10 +1021,6 @@ struct ata_timing {
/*
* Core layer - drivers/ata/libata-core.c
*/
-extern const unsigned long sata_deb_timing_normal[];
-extern const unsigned long sata_deb_timing_hotplug[];
-extern const unsigned long sata_deb_timing_long[];
-
extern struct ata_port_operations ata_dummy_port_ops;
extern const struct ata_port_info ata_dummy_port_info;
@@ -1060,33 +1058,14 @@ static inline int is_multi_taskfile(struct ata_taskfile *tf)
(tf->command == ATA_CMD_WRITE_MULTI_FUA_EXT);
}
-static inline const unsigned long *
-sata_ehc_deb_timing(struct ata_eh_context *ehc)
-{
- if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
- return sata_deb_timing_hotplug;
- else
- return sata_deb_timing_normal;
-}
-
static inline int ata_port_is_dummy(struct ata_port *ap)
{
return ap->ops == &ata_dummy_port_ops;
}
-extern int sata_set_spd(struct ata_link *link);
extern int ata_std_prereset(struct ata_link *link, unsigned long deadline);
extern int ata_wait_after_reset(struct ata_link *link, unsigned long deadline,
int (*check_ready)(struct ata_link *link));
-extern int sata_link_debounce(struct ata_link *link,
- const unsigned long *params, unsigned long deadline);
-extern int sata_link_resume(struct ata_link *link, const unsigned long *params,
- unsigned long deadline);
-extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
- bool spm_wakeup);
-extern int sata_link_hardreset(struct ata_link *link,
- const unsigned long *timing, unsigned long deadline,
- bool *online, int (*check_ready)(struct ata_link *));
extern int sata_std_hardreset(struct ata_link *link, unsigned int *class,
unsigned long deadline);
extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
@@ -1094,7 +1073,6 @@ extern void ata_std_postreset(struct ata_link *link, unsigned int *classes);
extern struct ata_host *ata_host_alloc(struct device *dev, int max_ports);
extern struct ata_host *ata_host_alloc_pinfo(struct device *dev,
const struct ata_port_info * const * ppi, int n_ports);
-extern int ata_slave_link_init(struct ata_port *ap);
extern void ata_host_get(struct ata_host *host);
extern void ata_host_put(struct ata_host *host);
extern int ata_host_start(struct ata_host *host);
@@ -1108,25 +1086,14 @@ extern void ata_host_init(struct ata_host *, struct device *, struct ata_port_op
extern int ata_scsi_detect(struct scsi_host_template *sht);
extern int ata_scsi_ioctl(struct scsi_device *dev, unsigned int cmd,
void __user *arg);
+#ifdef CONFIG_COMPAT
+#define ATA_SCSI_COMPAT_IOCTL .compat_ioctl = ata_scsi_ioctl,
+#else
+#define ATA_SCSI_COMPAT_IOCTL /* empty */
+#endif
extern int ata_scsi_queuecmd(struct Scsi_Host *h, struct scsi_cmnd *cmd);
extern int ata_sas_scsi_ioctl(struct ata_port *ap, struct scsi_device *dev,
unsigned int cmd, void __user *arg);
-extern void ata_sas_port_destroy(struct ata_port *);
-extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
- struct ata_port_info *, struct Scsi_Host *);
-extern void ata_sas_async_probe(struct ata_port *ap);
-extern int ata_sas_sync_probe(struct ata_port *ap);
-extern int ata_sas_port_init(struct ata_port *);
-extern int ata_sas_port_start(struct ata_port *ap);
-extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
-extern void ata_sas_tport_delete(struct ata_port *ap);
-extern void ata_sas_port_stop(struct ata_port *ap);
-extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
-extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
-extern int sata_scr_valid(struct ata_link *link);
-extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
-extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
-extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
extern bool ata_link_online(struct ata_link *link);
extern bool ata_link_offline(struct ata_link *link);
#ifdef CONFIG_PM
@@ -1147,9 +1114,6 @@ extern void ata_msleep(struct ata_port *ap, unsigned int msecs);
extern u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask,
u32 val, unsigned long interval, unsigned long timeout);
extern int atapi_cmd_type(u8 opcode);
-extern void ata_tf_to_fis(const struct ata_taskfile *tf,
- u8 pmp, int is_cmd, u8 *fis);
-extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
extern unsigned long ata_pack_xfermask(unsigned long pio_mask,
unsigned long mwdma_mask, unsigned long udma_mask);
extern void ata_unpack_xfermask(unsigned long xfer_mask,
@@ -1161,7 +1125,7 @@ extern int ata_xfer_mode2shift(unsigned long xfer_mode);
extern const char *ata_mode_string(unsigned long xfer_mask);
extern unsigned long ata_id_xfermask(const u16 *id);
extern int ata_std_qc_defer(struct ata_queued_cmd *qc);
-extern void ata_noop_qc_prep(struct ata_queued_cmd *qc);
+extern enum ata_completion_errors ata_noop_qc_prep(struct ata_queued_cmd *qc);
extern void ata_sg_init(struct ata_queued_cmd *qc, struct scatterlist *sg,
unsigned int n_elem);
extern unsigned int ata_dev_classify(const struct ata_taskfile *tf);
@@ -1173,7 +1137,7 @@ extern void ata_id_c_string(const u16 *id, unsigned char *s,
extern unsigned int ata_do_dev_read_id(struct ata_device *dev,
struct ata_taskfile *tf, u16 *id);
extern void ata_qc_complete(struct ata_queued_cmd *qc);
-extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active);
+extern u64 ata_qc_get_active(struct ata_port *ap);
extern void ata_scsi_simulate(struct ata_device *dev, struct scsi_cmnd *cmd);
extern int ata_std_bios_param(struct scsi_device *sdev,
struct block_device *bdev,
@@ -1189,7 +1153,96 @@ extern struct ata_device *ata_dev_pair(struct ata_device *adev);
extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q);
+
+/*
+ * SATA specific code - drivers/ata/libata-sata.c
+ */
+#ifdef CONFIG_SATA_HOST
+extern const unsigned long sata_deb_timing_normal[];
+extern const unsigned long sata_deb_timing_hotplug[];
+extern const unsigned long sata_deb_timing_long[];
+
+static inline const unsigned long *
+sata_ehc_deb_timing(struct ata_eh_context *ehc)
+{
+ if (ehc->i.flags & ATA_EHI_HOTPLUGGED)
+ return sata_deb_timing_hotplug;
+ else
+ return sata_deb_timing_normal;
+}
+
+extern int sata_scr_valid(struct ata_link *link);
+extern int sata_scr_read(struct ata_link *link, int reg, u32 *val);
+extern int sata_scr_write(struct ata_link *link, int reg, u32 val);
+extern int sata_scr_write_flush(struct ata_link *link, int reg, u32 val);
+extern int sata_set_spd(struct ata_link *link);
+extern int sata_link_hardreset(struct ata_link *link,
+ const unsigned long *timing, unsigned long deadline,
+ bool *online, int (*check_ready)(struct ata_link *));
+extern int sata_link_resume(struct ata_link *link, const unsigned long *params,
+ unsigned long deadline);
+extern void ata_eh_analyze_ncq_error(struct ata_link *link);
+#else
+static inline const unsigned long *
+sata_ehc_deb_timing(struct ata_eh_context *ehc)
+{
+ return NULL;
+}
+static inline int sata_scr_valid(struct ata_link *link) { return 0; }
+static inline int sata_scr_read(struct ata_link *link, int reg, u32 *val)
+{
+ return -EOPNOTSUPP;
+}
+static inline int sata_scr_write(struct ata_link *link, int reg, u32 val)
+{
+ return -EOPNOTSUPP;
+}
+static inline int sata_scr_write_flush(struct ata_link *link, int reg, u32 val)
+{
+ return -EOPNOTSUPP;
+}
+static inline int sata_set_spd(struct ata_link *link) { return -EOPNOTSUPP; }
+static inline int sata_link_hardreset(struct ata_link *link,
+ const unsigned long *timing,
+ unsigned long deadline,
+ bool *online,
+ int (*check_ready)(struct ata_link *))
+{
+ if (online)
+ *online = false;
+ return -EOPNOTSUPP;
+}
+static inline int sata_link_resume(struct ata_link *link,
+ const unsigned long *params,
+ unsigned long deadline)
+{
+ return -EOPNOTSUPP;
+}
+static inline void ata_eh_analyze_ncq_error(struct ata_link *link) { }
+#endif
+extern int sata_link_debounce(struct ata_link *link,
+ const unsigned long *params, unsigned long deadline);
+extern int sata_link_scr_lpm(struct ata_link *link, enum ata_lpm_policy policy,
+ bool spm_wakeup);
+extern int ata_slave_link_init(struct ata_port *ap);
+extern void ata_sas_port_destroy(struct ata_port *);
+extern struct ata_port *ata_sas_port_alloc(struct ata_host *,
+ struct ata_port_info *, struct Scsi_Host *);
+extern void ata_sas_async_probe(struct ata_port *ap);
+extern int ata_sas_sync_probe(struct ata_port *ap);
+extern int ata_sas_port_init(struct ata_port *);
+extern int ata_sas_port_start(struct ata_port *ap);
+extern int ata_sas_tport_add(struct device *parent, struct ata_port *ap);
+extern void ata_sas_tport_delete(struct ata_port *ap);
+extern void ata_sas_port_stop(struct ata_port *ap);
+extern int ata_sas_slave_configure(struct scsi_device *, struct ata_port *);
+extern int ata_sas_queuecmd(struct scsi_cmnd *cmd, struct ata_port *ap);
+extern void ata_tf_to_fis(const struct ata_taskfile *tf,
+ u8 pmp, int is_cmd, u8 *fis);
+extern void ata_tf_from_fis(const u8 *fis, struct ata_taskfile *tf);
+extern int ata_qc_complete_multiple(struct ata_port *ap, u64 qc_active);
extern bool sata_lpm_ignore_phy_events(struct ata_link *link);
+extern int sata_async_notification(struct ata_port *ap);
extern int ata_cable_40wire(struct ata_port *ap);
extern int ata_cable_80wire(struct ata_port *ap);
@@ -1199,12 +1252,6 @@ extern int ata_cable_unknown(struct ata_port *ap);
/* Timing helpers */
extern unsigned int ata_pio_need_iordy(const struct ata_device *);
-extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode);
-extern int ata_timing_compute(struct ata_device *, unsigned short,
- struct ata_timing *, int, int);
-extern void ata_timing_merge(const struct ata_timing *,
- const struct ata_timing *, struct ata_timing *,
- unsigned int);
extern u8 ata_timing_cycle2mode(unsigned int xfer_shift, int cycle);
/* PCI */
@@ -1219,6 +1266,7 @@ struct pci_bits {
};
extern int pci_test_config_bits(struct pci_dev *pdev, const struct pci_bits *bits);
+extern void ata_pci_shutdown_one(struct pci_dev *pdev);
extern void ata_pci_remove_one(struct pci_dev *pdev);
#ifdef CONFIG_PM
@@ -1287,14 +1335,12 @@ extern void ata_port_wait_eh(struct ata_port *ap);
extern int ata_link_abort(struct ata_link *link);
extern int ata_port_abort(struct ata_port *ap);
extern int ata_port_freeze(struct ata_port *ap);
-extern int sata_async_notification(struct ata_port *ap);
extern void ata_eh_freeze_port(struct ata_port *ap);
extern void ata_eh_thaw_port(struct ata_port *ap);
extern void ata_eh_qc_complete(struct ata_queued_cmd *qc);
extern void ata_eh_qc_retry(struct ata_queued_cmd *qc);
-extern void ata_eh_analyze_ncq_error(struct ata_link *link);
extern void ata_do_eh(struct ata_port *ap, ata_prereset_fn_t prereset,
ata_reset_fn_t softreset, ata_reset_fn_t hardreset,
@@ -1335,10 +1381,11 @@ extern struct device_attribute *ata_common_sdev_attrs[];
* edge driver's module reference, otherwise the driver can be unloaded
* even if the scsi_device is being accessed.
*/
-#define ATA_BASE_SHT(drv_name) \
+#define __ATA_BASE_SHT(drv_name) \
.module = THIS_MODULE, \
.name = drv_name, \
.ioctl = ata_scsi_ioctl, \
+ ATA_SCSI_COMPAT_IOCTL \
.queuecommand = ata_scsi_queuecmd, \
.can_queue = ATA_DEF_QUEUE, \
.tag_alloc_policy = BLK_TAG_ALLOC_RR, \
@@ -1348,12 +1395,20 @@ extern struct device_attribute *ata_common_sdev_attrs[];
.slave_configure = ata_scsi_slave_config, \
.slave_destroy = ata_scsi_slave_destroy, \
.bios_param = ata_std_bios_param, \
- .unlock_native_capacity = ata_scsi_unlock_native_capacity, \
+ .unlock_native_capacity = ata_scsi_unlock_native_capacity
+
+#define ATA_BASE_SHT(drv_name) \
+ __ATA_BASE_SHT(drv_name), \
.sdev_attrs = ata_common_sdev_attrs
+#ifdef CONFIG_SATA_HOST
+extern struct device_attribute *ata_ncq_sdev_attrs[];
+
#define ATA_NCQ_SHT(drv_name) \
- ATA_BASE_SHT(drv_name), \
+ __ATA_BASE_SHT(drv_name), \
+ .sdev_attrs = ata_ncq_sdev_attrs, \
.change_queue_depth = ata_scsi_change_queue_depth
+#endif
/*
* PMP helpers
@@ -1626,6 +1681,8 @@ extern struct ata_device *ata_dev_next(struct ata_device *dev,
*/
static inline int ata_ncq_enabled(struct ata_device *dev)
{
+ if (!IS_ENABLED(CONFIG_SATA_HOST))
+ return 0;
return (dev->flags & (ATA_DFLAG_PIO | ATA_DFLAG_NCQ_OFF |
ATA_DFLAG_NCQ)) == ATA_DFLAG_NCQ;
}
@@ -1795,6 +1852,16 @@ static inline int ata_dma_enabled(struct ata_device *adev)
}
/**************************************************************************
+ * PATA timings - drivers/ata/libata-pata-timings.c
+ */
+extern const struct ata_timing *ata_timing_find_mode(u8 xfer_mode);
+extern int ata_timing_compute(struct ata_device *, unsigned short,
+ struct ata_timing *, int, int);
+extern void ata_timing_merge(const struct ata_timing *,
+ const struct ata_timing *, struct ata_timing *,
+ unsigned int);
+
+/**************************************************************************
* PMP - drivers/ata/libata-pmp.c
*/
#ifdef CONFIG_SATA_PMP
@@ -1893,9 +1960,9 @@ extern const struct ata_port_operations ata_bmdma_port_ops;
.sg_tablesize = LIBATA_MAX_PRD, \
.dma_boundary = ATA_DMA_BOUNDARY
-extern void ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
+extern enum ata_completion_errors ata_bmdma_qc_prep(struct ata_queued_cmd *qc);
extern unsigned int ata_bmdma_qc_issue(struct ata_queued_cmd *qc);
-extern void ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
+extern enum ata_completion_errors ata_bmdma_dumb_qc_prep(struct ata_queued_cmd *qc);
extern unsigned int ata_bmdma_port_intr(struct ata_port *ap,
struct ata_queued_cmd *qc);
extern irqreturn_t ata_bmdma_interrupt(int irq, void *dev_instance);
diff --git a/include/linux/libfdt_env.h b/include/linux/libfdt_env.h
index edb0f0c30904..cea8574a29b1 100644
--- a/include/linux/libfdt_env.h
+++ b/include/linux/libfdt_env.h
@@ -2,11 +2,14 @@
#ifndef LIBFDT_ENV_H
#define LIBFDT_ENV_H
-#include <linux/kernel.h> /* For INT_MAX */
+#include <linux/limits.h> /* For INT_MAX */
#include <linux/string.h>
#include <asm/byteorder.h>
+#define INT32_MAX S32_MAX
+#define UINT32_MAX U32_MAX
+
typedef __be16 fdt16_t;
typedef __be32 fdt32_t;
typedef __be64 fdt64_t;
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index b6eddf912568..9df091bd30ba 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -65,13 +65,6 @@ enum {
DPA_RESOURCE_ADJUSTED = 1 << 0,
};
-extern struct attribute_group nvdimm_bus_attribute_group;
-extern struct attribute_group nvdimm_attribute_group;
-extern struct attribute_group nd_device_attribute_group;
-extern struct attribute_group nd_numa_attribute_group;
-extern struct attribute_group nd_region_attribute_group;
-extern struct attribute_group nd_mapping_attribute_group;
-
struct nvdimm;
struct nvdimm_bus_descriptor;
typedef int (*ndctl_fn)(struct nvdimm_bus_descriptor *nd_desc,
diff --git a/include/linux/license.h b/include/linux/license.h
index decdbf43cb5c..7cce390f120b 100644
--- a/include/linux/license.h
+++ b/include/linux/license.h
@@ -1,3 +1,4 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
#ifndef __LICENSE_H
#define __LICENSE_H
diff --git a/include/linux/limits.h b/include/linux/limits.h
index 76afcd24ff8c..7fc497ee1393 100644
--- a/include/linux/limits.h
+++ b/include/linux/limits.h
@@ -4,19 +4,8 @@
#include <uapi/linux/limits.h>
#include <linux/types.h>
+#include <vdso/limits.h>
-#define USHRT_MAX ((unsigned short)~0U)
-#define SHRT_MAX ((short)(USHRT_MAX >> 1))
-#define SHRT_MIN ((short)(-SHRT_MAX - 1))
-#define INT_MAX ((int)(~0U >> 1))
-#define INT_MIN (-INT_MAX - 1)
-#define UINT_MAX (~0U)
-#define LONG_MAX ((long)(~0UL >> 1))
-#define LONG_MIN (-LONG_MAX - 1)
-#define ULONG_MAX (~0UL)
-#define LLONG_MAX ((long long)(~0ULL >> 1))
-#define LLONG_MIN (-LLONG_MAX - 1)
-#define ULLONG_MAX (~0ULL)
#define SIZE_MAX (~(size_t)0)
#define PHYS_ADDR_MAX (~(phys_addr_t)0)
diff --git a/include/linux/linkage.h b/include/linux/linkage.h
index 7e020782ade2..9280209d1f62 100644
--- a/include/linux/linkage.h
+++ b/include/linux/linkage.h
@@ -75,32 +75,61 @@
#ifdef __ASSEMBLY__
+/* SYM_T_FUNC -- type used by assembler to mark functions */
+#ifndef SYM_T_FUNC
+#define SYM_T_FUNC STT_FUNC
+#endif
+
+/* SYM_T_OBJECT -- type used by assembler to mark data */
+#ifndef SYM_T_OBJECT
+#define SYM_T_OBJECT STT_OBJECT
+#endif
+
+/* SYM_T_NONE -- type used by assembler to mark entries of unknown type */
+#ifndef SYM_T_NONE
+#define SYM_T_NONE STT_NOTYPE
+#endif
+
+/* SYM_A_* -- align the symbol? */
+#define SYM_A_ALIGN ALIGN
+#define SYM_A_NONE /* nothing */
+
+/* SYM_L_* -- linkage of symbols */
+#define SYM_L_GLOBAL(name) .globl name
+#define SYM_L_WEAK(name) .weak name
+#define SYM_L_LOCAL(name) /* nothing */
+
#ifndef LINKER_SCRIPT
#define ALIGN __ALIGN
#define ALIGN_STR __ALIGN_STR
+/* === DEPRECATED annotations === */
+
+#ifndef CONFIG_X86
#ifndef GLOBAL
+/* deprecated, use SYM_DATA*, SYM_ENTRY, or similar */
#define GLOBAL(name) \
.globl name ASM_NL \
name:
#endif
#ifndef ENTRY
+/* deprecated, use SYM_FUNC_START */
#define ENTRY(name) \
- .globl name ASM_NL \
- ALIGN ASM_NL \
- name:
+ SYM_FUNC_START(name)
#endif
+#endif /* CONFIG_X86 */
#endif /* LINKER_SCRIPT */
+#ifndef CONFIG_X86
#ifndef WEAK
+/* deprecated, use SYM_FUNC_START_WEAK* */
#define WEAK(name) \
- .weak name ASM_NL \
- ALIGN ASM_NL \
- name:
+ SYM_FUNC_START_WEAK(name)
#endif
#ifndef END
+/* deprecated, use SYM_FUNC_END, SYM_DATA_END, or SYM_END */
#define END(name) \
.size name, .-name
#endif
@@ -110,11 +139,215 @@
* static analysis tools such as stack depth analyzer.
*/
#ifndef ENDPROC
+/* deprecated, use SYM_FUNC_END */
#define ENDPROC(name) \
- .type name, @function ASM_NL \
- END(name)
+ SYM_FUNC_END(name)
+#endif
+#endif /* CONFIG_X86 */
+
+/* === generic annotations === */
+
+/* SYM_ENTRY -- use only if you have to for non-paired symbols */
+#ifndef SYM_ENTRY
+#define SYM_ENTRY(name, linkage, align...) \
+ linkage(name) ASM_NL \
+ align ASM_NL \
+ name:
+#endif
+
+/* SYM_START -- use only if you have to */
+#ifndef SYM_START
+#define SYM_START(name, linkage, align...) \
+ SYM_ENTRY(name, linkage, align)
+#endif
+
+/* SYM_END -- use only if you have to */
+#ifndef SYM_END
+#define SYM_END(name, sym_type) \
+ .type name sym_type ASM_NL \
+ .size name, .-name
+#endif
+
+/* === code annotations === */
+
+/*
+ * FUNC -- C-like functions (proper stack frame etc.)
+ * CODE -- non-C code (e.g. irq handlers with different, special stack etc.)
+ *
+ * Objtool validates stack for FUNC, but not for CODE.
+ * Objtool generates debug info for both FUNC & CODE, but needs special
+ * annotations for each CODE's start (to describe the actual stack frame).
+ *
+ * ALIAS -- does not generate debug info -- the aliased function will
+ */
+
+/* SYM_INNER_LABEL_ALIGN -- only for labels in the middle of code */
+#ifndef SYM_INNER_LABEL_ALIGN
+#define SYM_INNER_LABEL_ALIGN(name, linkage) \
+ .type name SYM_T_NONE ASM_NL \
+ SYM_ENTRY(name, linkage, SYM_A_ALIGN)
+#endif
+
+/* SYM_INNER_LABEL -- only for labels in the middle of code */
+#ifndef SYM_INNER_LABEL
+#define SYM_INNER_LABEL(name, linkage) \
+ .type name SYM_T_NONE ASM_NL \
+ SYM_ENTRY(name, linkage, SYM_A_NONE)
+#endif
+
+/*
+ * SYM_FUNC_START_LOCAL_ALIAS -- use where there are two local names for one
+ * function
+ */
+#ifndef SYM_FUNC_START_LOCAL_ALIAS
+#define SYM_FUNC_START_LOCAL_ALIAS(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)
+#endif
+
+/*
+ * SYM_FUNC_START_ALIAS -- use where there are two global names for one
+ * function
+ */
+#ifndef SYM_FUNC_START_ALIAS
+#define SYM_FUNC_START_ALIAS(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
+#endif
+
+/* SYM_FUNC_START -- use for global functions */
+#ifndef SYM_FUNC_START
+/*
+ * The same as SYM_FUNC_START_ALIAS, but we will need to distinguish these two
+ * later.
+ */
+#define SYM_FUNC_START(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
+#endif
+
+/* SYM_FUNC_START_NOALIGN -- use for global functions, w/o alignment */
+#ifndef SYM_FUNC_START_NOALIGN
+#define SYM_FUNC_START_NOALIGN(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)
+#endif
+
+/* SYM_FUNC_START_LOCAL -- use for local functions */
+#ifndef SYM_FUNC_START_LOCAL
+/* the same as SYM_FUNC_START_LOCAL_ALIAS, see comment near SYM_FUNC_START */
+#define SYM_FUNC_START_LOCAL(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)
#endif
+/* SYM_FUNC_START_LOCAL_NOALIGN -- use for local functions, w/o alignment */
+#ifndef SYM_FUNC_START_LOCAL_NOALIGN
+#define SYM_FUNC_START_LOCAL_NOALIGN(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)
#endif
+/* SYM_FUNC_START_WEAK -- use for weak functions */
+#ifndef SYM_FUNC_START_WEAK
+#define SYM_FUNC_START_WEAK(name) \
+ SYM_START(name, SYM_L_WEAK, SYM_A_ALIGN)
#endif
+
+/* SYM_FUNC_START_WEAK_NOALIGN -- use for weak functions, w/o alignment */
+#ifndef SYM_FUNC_START_WEAK_NOALIGN
+#define SYM_FUNC_START_WEAK_NOALIGN(name) \
+ SYM_START(name, SYM_L_WEAK, SYM_A_NONE)
+#endif
+
+/* SYM_FUNC_END_ALIAS -- the end of LOCAL_ALIASed or ALIASed function */
+#ifndef SYM_FUNC_END_ALIAS
+#define SYM_FUNC_END_ALIAS(name) \
+ SYM_END(name, SYM_T_FUNC)
+#endif
+
+/*
+ * SYM_FUNC_END -- the end of SYM_FUNC_START_LOCAL, SYM_FUNC_START,
+ * SYM_FUNC_START_WEAK, ...
+ */
+#ifndef SYM_FUNC_END
+/* the same as SYM_FUNC_END_ALIAS, see comment near SYM_FUNC_START */
+#define SYM_FUNC_END(name) \
+ SYM_END(name, SYM_T_FUNC)
+#endif
+
+/* SYM_CODE_START -- use for non-C (special) functions */
+#ifndef SYM_CODE_START
+#define SYM_CODE_START(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_ALIGN)
+#endif
+
+/* SYM_CODE_START_NOALIGN -- use for non-C (special) functions, w/o alignment */
+#ifndef SYM_CODE_START_NOALIGN
+#define SYM_CODE_START_NOALIGN(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)
+#endif
+
+/* SYM_CODE_START_LOCAL -- use for local non-C (special) functions */
+#ifndef SYM_CODE_START_LOCAL
+#define SYM_CODE_START_LOCAL(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_ALIGN)
+#endif
+
+/*
+ * SYM_CODE_START_LOCAL_NOALIGN -- use for local non-C (special) functions,
+ * w/o alignment
+ */
+#ifndef SYM_CODE_START_LOCAL_NOALIGN
+#define SYM_CODE_START_LOCAL_NOALIGN(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)
+#endif
+
+/* SYM_CODE_END -- the end of SYM_CODE_START_LOCAL, SYM_CODE_START, ... */
+#ifndef SYM_CODE_END
+#define SYM_CODE_END(name) \
+ SYM_END(name, SYM_T_NONE)
+#endif
+
+/* === data annotations === */
+
+/* SYM_DATA_START -- global data symbol */
+#ifndef SYM_DATA_START
+#define SYM_DATA_START(name) \
+ SYM_START(name, SYM_L_GLOBAL, SYM_A_NONE)
+#endif
+
+/* SYM_DATA_START -- local data symbol */
+#ifndef SYM_DATA_START_LOCAL
+#define SYM_DATA_START_LOCAL(name) \
+ SYM_START(name, SYM_L_LOCAL, SYM_A_NONE)
+#endif
+
+/* SYM_DATA_END -- the end of SYM_DATA_START symbol */
+#ifndef SYM_DATA_END
+#define SYM_DATA_END(name) \
+ SYM_END(name, SYM_T_OBJECT)
+#endif
+
+/* SYM_DATA_END_LABEL -- the labeled end of SYM_DATA_START symbol */
+#ifndef SYM_DATA_END_LABEL
+#define SYM_DATA_END_LABEL(name, linkage, label) \
+ linkage(label) ASM_NL \
+ .type label SYM_T_OBJECT ASM_NL \
+ label: \
+ SYM_END(name, SYM_T_OBJECT)
+#endif
+
+/* SYM_DATA -- start+end wrapper around simple global data */
+#ifndef SYM_DATA
+#define SYM_DATA(name, data...) \
+ SYM_DATA_START(name) ASM_NL \
+ data ASM_NL \
+ SYM_DATA_END(name)
+#endif
+
+/* SYM_DATA_LOCAL -- start+end wrapper around simple local data */
+#ifndef SYM_DATA_LOCAL
+#define SYM_DATA_LOCAL(name, data...) \
+ SYM_DATA_START_LOCAL(name) ASM_NL \
+ data ASM_NL \
+ SYM_DATA_END(name)
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _LINUX_LINKAGE_H */
diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h
index a99c58866860..fe740031339d 100644
--- a/include/linux/linkmode.h
+++ b/include/linux/linkmode.h
@@ -82,4 +82,10 @@ static inline int linkmode_equal(const unsigned long *src1,
return bitmap_equal(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
}
+static inline int linkmode_subset(const unsigned long *src1,
+ const unsigned long *src2)
+{
+ return bitmap_subset(src1, src2, __ETHTOOL_LINK_MODE_MASK_NBITS);
+}
+
#endif /* __LINKMODE_H */
diff --git a/include/linux/list.h b/include/linux/list.h
index 85c92555e31f..884216db3246 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -23,6 +23,13 @@
#define LIST_HEAD(name) \
struct list_head name = LIST_HEAD_INIT(name)
+/**
+ * INIT_LIST_HEAD - Initialize a list_head structure
+ * @list: list_head structure to be initialized.
+ *
+ * Initializes the list_head to point to itself. If it is a list header,
+ * the result is an empty list.
+ */
static inline void INIT_LIST_HEAD(struct list_head *list)
{
WRITE_ONCE(list->next, list);
@@ -120,12 +127,6 @@ static inline void __list_del_clearprev(struct list_head *entry)
entry->prev = NULL;
}
-/**
- * list_del - deletes entry from list.
- * @entry: the element to delete from the list.
- * Note: list_empty() on entry does not return true after this, the entry is
- * in an undefined state.
- */
static inline void __list_del_entry(struct list_head *entry)
{
if (!__list_del_entry_valid(entry))
@@ -134,6 +135,12 @@ static inline void __list_del_entry(struct list_head *entry)
__list_del(entry->prev, entry->next);
}
+/**
+ * list_del - deletes entry from list.
+ * @entry: the element to delete from the list.
+ * Note: list_empty() on entry does not return true after this, the entry is
+ * in an undefined state.
+ */
static inline void list_del(struct list_head *entry)
{
__list_del_entry(entry);
@@ -157,8 +164,15 @@ static inline void list_replace(struct list_head *old,
new->prev->next = new;
}
+/**
+ * list_replace_init - replace old entry by new one and initialize the old one
+ * @old : the element to be replaced
+ * @new : the new element to insert
+ *
+ * If @old was empty, it will be overwritten.
+ */
static inline void list_replace_init(struct list_head *old,
- struct list_head *new)
+ struct list_head *new)
{
list_replace(old, new);
INIT_LIST_HEAD(old);
@@ -539,6 +553,16 @@ static inline void list_splice_tail_init(struct list_head *list,
for (pos = (head)->next; pos != (head); pos = pos->next)
/**
+ * list_for_each_continue - continue iteration over a list
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ *
+ * Continue to iterate over a list, continuing after the current position.
+ */
+#define list_for_each_continue(pos, head) \
+ for (pos = pos->next; pos != (head); pos = pos->next)
+
+/**
* list_for_each_prev - iterate over a list backwards
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
@@ -744,11 +768,36 @@ static inline void INIT_HLIST_NODE(struct hlist_node *h)
h->pprev = NULL;
}
+/**
+ * hlist_unhashed - Has node been removed from list and reinitialized?
+ * @h: Node to be checked
+ *
+ * Not that not all removal functions will leave a node in unhashed
+ * state. For example, hlist_nulls_del_init_rcu() does leave the
+ * node in unhashed state, but hlist_nulls_del() does not.
+ */
static inline int hlist_unhashed(const struct hlist_node *h)
{
return !h->pprev;
}
+/**
+ * hlist_unhashed_lockless - Version of hlist_unhashed for lockless use
+ * @h: Node to be checked
+ *
+ * This variant of hlist_unhashed() must be used in lockless contexts
+ * to avoid potential load-tearing. The READ_ONCE() is paired with the
+ * various WRITE_ONCE() in hlist helpers that are defined below.
+ */
+static inline int hlist_unhashed_lockless(const struct hlist_node *h)
+{
+ return !READ_ONCE(h->pprev);
+}
+
+/**
+ * hlist_empty - Is the specified hlist_head structure an empty hlist?
+ * @h: Structure to check.
+ */
static inline int hlist_empty(const struct hlist_head *h)
{
return !READ_ONCE(h->first);
@@ -761,9 +810,16 @@ static inline void __hlist_del(struct hlist_node *n)
WRITE_ONCE(*pprev, next);
if (next)
- next->pprev = pprev;
+ WRITE_ONCE(next->pprev, pprev);
}
+/**
+ * hlist_del - Delete the specified hlist_node from its list
+ * @n: Node to delete.
+ *
+ * Note that this function leaves the node in hashed state. Use
+ * hlist_del_init() or similar instead to unhash @n.
+ */
static inline void hlist_del(struct hlist_node *n)
{
__hlist_del(n);
@@ -771,6 +827,12 @@ static inline void hlist_del(struct hlist_node *n)
n->pprev = LIST_POISON2;
}
+/**
+ * hlist_del_init - Delete the specified hlist_node from its list and initialize
+ * @n: Node to delete.
+ *
+ * Note that this function leaves the node in unhashed state.
+ */
static inline void hlist_del_init(struct hlist_node *n)
{
if (!hlist_unhashed(n)) {
@@ -779,51 +841,83 @@ static inline void hlist_del_init(struct hlist_node *n)
}
}
+/**
+ * hlist_add_head - add a new entry at the beginning of the hlist
+ * @n: new entry to be added
+ * @h: hlist head to add it after
+ *
+ * Insert a new entry after the specified head.
+ * This is good for implementing stacks.
+ */
static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h)
{
struct hlist_node *first = h->first;
- n->next = first;
+ WRITE_ONCE(n->next, first);
if (first)
- first->pprev = &n->next;
+ WRITE_ONCE(first->pprev, &n->next);
WRITE_ONCE(h->first, n);
- n->pprev = &h->first;
+ WRITE_ONCE(n->pprev, &h->first);
}
-/* next must be != NULL */
+/**
+ * hlist_add_before - add a new entry before the one specified
+ * @n: new entry to be added
+ * @next: hlist node to add it before, which must be non-NULL
+ */
static inline void hlist_add_before(struct hlist_node *n,
- struct hlist_node *next)
+ struct hlist_node *next)
{
- n->pprev = next->pprev;
- n->next = next;
- next->pprev = &n->next;
+ WRITE_ONCE(n->pprev, next->pprev);
+ WRITE_ONCE(n->next, next);
+ WRITE_ONCE(next->pprev, &n->next);
WRITE_ONCE(*(n->pprev), n);
}
+/**
+ * hlist_add_behing - add a new entry after the one specified
+ * @n: new entry to be added
+ * @prev: hlist node to add it after, which must be non-NULL
+ */
static inline void hlist_add_behind(struct hlist_node *n,
struct hlist_node *prev)
{
- n->next = prev->next;
- prev->next = n;
- n->pprev = &prev->next;
+ WRITE_ONCE(n->next, prev->next);
+ WRITE_ONCE(prev->next, n);
+ WRITE_ONCE(n->pprev, &prev->next);
if (n->next)
- n->next->pprev = &n->next;
+ WRITE_ONCE(n->next->pprev, &n->next);
}
-/* after that we'll appear to be on some hlist and hlist_del will work */
+/**
+ * hlist_add_fake - create a fake hlist consisting of a single headless node
+ * @n: Node to make a fake list out of
+ *
+ * This makes @n appear to be its own predecessor on a headless hlist.
+ * The point of this is to allow things like hlist_del() to work correctly
+ * in cases where there is no list.
+ */
static inline void hlist_add_fake(struct hlist_node *n)
{
n->pprev = &n->next;
}
+/**
+ * hlist_fake: Is this node a fake hlist?
+ * @h: Node to check for being a self-referential fake hlist.
+ */
static inline bool hlist_fake(struct hlist_node *h)
{
return h->pprev == &h->next;
}
-/*
+/**
+ * hlist_is_singular_node - is node the only element of the specified hlist?
+ * @n: Node to check for singularity.
+ * @h: Header for potentially singular list.
+ *
* Check whether the node is the only node of the head without
- * accessing head:
+ * accessing head, thus avoiding unnecessary cache misses.
*/
static inline bool
hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
@@ -831,7 +925,11 @@ hlist_is_singular_node(struct hlist_node *n, struct hlist_head *h)
return !n->next && n->pprev == &h->first;
}
-/*
+/**
+ * hlist_move_list - Move an hlist
+ * @old: hlist_head for old list.
+ * @new: hlist_head for new list.
+ *
* Move a list from one list head to another. Fixup the pprev
* reference of the first entry if it exists.
*/
diff --git a/include/linux/list_nulls.h b/include/linux/list_nulls.h
index 3ef96743db8d..fa6e8471bd22 100644
--- a/include/linux/list_nulls.h
+++ b/include/linux/list_nulls.h
@@ -56,11 +56,33 @@ static inline unsigned long get_nulls_value(const struct hlist_nulls_node *ptr)
return ((unsigned long)ptr) >> 1;
}
+/**
+ * hlist_nulls_unhashed - Has node been removed and reinitialized?
+ * @h: Node to be checked
+ *
+ * Not that not all removal functions will leave a node in unhashed state.
+ * For example, hlist_del_init_rcu() leaves the node in unhashed state,
+ * but hlist_nulls_del() does not.
+ */
static inline int hlist_nulls_unhashed(const struct hlist_nulls_node *h)
{
return !h->pprev;
}
+/**
+ * hlist_nulls_unhashed_lockless - Has node been removed and reinitialized?
+ * @h: Node to be checked
+ *
+ * Not that not all removal functions will leave a node in unhashed state.
+ * For example, hlist_del_init_rcu() leaves the node in unhashed state,
+ * but hlist_nulls_del() does not. Unlike hlist_nulls_unhashed(), this
+ * function may be used locklessly.
+ */
+static inline int hlist_nulls_unhashed_lockless(const struct hlist_nulls_node *h)
+{
+ return !READ_ONCE(h->pprev);
+}
+
static inline int hlist_nulls_empty(const struct hlist_nulls_head *h)
{
return is_a_nulls(READ_ONCE(h->first));
@@ -72,10 +94,10 @@ static inline void hlist_nulls_add_head(struct hlist_nulls_node *n,
struct hlist_nulls_node *first = h->first;
n->next = first;
- n->pprev = &h->first;
+ WRITE_ONCE(n->pprev, &h->first);
h->first = n;
if (!is_a_nulls(first))
- first->pprev = &n->next;
+ WRITE_ONCE(first->pprev, &n->next);
}
static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
@@ -85,13 +107,13 @@ static inline void __hlist_nulls_del(struct hlist_nulls_node *n)
WRITE_ONCE(*pprev, next);
if (!is_a_nulls(next))
- next->pprev = pprev;
+ WRITE_ONCE(next->pprev, pprev);
}
static inline void hlist_nulls_del(struct hlist_nulls_node *n)
{
__hlist_nulls_del(n);
- n->pprev = LIST_POISON2;
+ WRITE_ONCE(n->pprev, LIST_POISON2);
}
/**
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 273400814020..e894e74905f3 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -131,9 +131,22 @@ struct klp_object {
};
/**
+ * struct klp_state - state of the system modified by the livepatch
+ * @id: system state identifier (non-zero)
+ * @version: version of the change
+ * @data: custom data
+ */
+struct klp_state {
+ unsigned long id;
+ unsigned int version;
+ void *data;
+};
+
+/**
* struct klp_patch - patch structure for live patching
* @mod: reference to the live patch module
* @objs: object entries for kernel objects to be patched
+ * @states: system states that can get modified
* @replace: replace all actively used patches
* @list: list node for global list of actively used patches
* @kobj: kobject for sysfs resources
@@ -147,6 +160,7 @@ struct klp_patch {
/* external */
struct module *mod;
struct klp_object *objs;
+ struct klp_state *states;
bool replace;
/* internal */
@@ -217,6 +231,9 @@ void *klp_shadow_get_or_alloc(void *obj, unsigned long id,
void klp_shadow_free(void *obj, unsigned long id, klp_shadow_dtor_t dtor);
void klp_shadow_free_all(unsigned long id, klp_shadow_dtor_t dtor);
+struct klp_state *klp_get_state(struct klp_patch *patch, unsigned long id);
+struct klp_state *klp_get_prev_state(unsigned long id);
+
#else /* !CONFIG_LIVEPATCH */
static inline int klp_module_coming(struct module *mod) { return 0; }
diff --git a/include/linux/lockd/debug.h b/include/linux/lockd/debug.h
index e536c579827f..eede2ab5246f 100644
--- a/include/linux/lockd/debug.h
+++ b/include/linux/lockd/debug.h
@@ -10,8 +10,6 @@
#ifndef LINUX_LOCKD_DEBUG_H
#define LINUX_LOCKD_DEBUG_H
-#ifdef __KERNEL__
-
#include <linux/sunrpc/debug.h>
/*
@@ -25,8 +23,6 @@
# define ifdebug(flag) if (0)
#endif
-#endif /* __KERNEL__ */
-
/*
* Debug flags
*/
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index d294dde9e546..666f5f310a04 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -10,8 +10,6 @@
#ifndef LINUX_LOCKD_LOCKD_H
#define LINUX_LOCKD_LOCKD_H
-#ifdef __KERNEL__
-
#include <linux/in.h>
#include <linux/in6.h>
#include <net/ipv6.h>
@@ -373,6 +371,4 @@ static inline int nlm_compare_locks(const struct file_lock *fl1,
extern const struct lock_manager_operations nlmsvc_lock_operations;
-#endif /* __KERNEL__ */
-
#endif /* LINUX_LOCKD_LOCKD_H */
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index b8a835fd611b..206774ac6946 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -21,6 +21,22 @@ extern int lock_stat;
#include <linux/types.h>
+enum lockdep_wait_type {
+ LD_WAIT_INV = 0, /* not checked, catch all */
+
+ LD_WAIT_FREE, /* wait free, rcu etc.. */
+ LD_WAIT_SPIN, /* spin loops, raw_spinlock_t etc.. */
+
+#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
+ LD_WAIT_CONFIG, /* CONFIG_PREEMPT_LOCK, spinlock_t etc.. */
+#else
+ LD_WAIT_CONFIG = LD_WAIT_SPIN,
+#endif
+ LD_WAIT_SLEEP, /* sleeping locks, mutex_t etc.. */
+
+ LD_WAIT_MAX, /* must be last */
+};
+
#ifdef CONFIG_LOCKDEP
#include <linux/linkage.h>
@@ -111,6 +127,9 @@ struct lock_class {
int name_version;
const char *name;
+ short wait_type_inner;
+ short wait_type_outer;
+
#ifdef CONFIG_LOCK_STAT
unsigned long contention_point[LOCKSTAT_POINTS];
unsigned long contending_point[LOCKSTAT_POINTS];
@@ -158,6 +177,8 @@ struct lockdep_map {
struct lock_class_key *key;
struct lock_class *class_cache[NR_LOCKDEP_CACHING_CLASSES];
const char *name;
+ short wait_type_outer; /* can be taken in this context */
+ short wait_type_inner; /* presents this context */
#ifdef CONFIG_LOCK_STAT
int cpu;
unsigned long ip;
@@ -299,8 +320,21 @@ extern void lockdep_unregister_key(struct lock_class_key *key);
* to lockdep:
*/
-extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
- struct lock_class_key *key, int subclass);
+extern void lockdep_init_map_waits(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass, short inner, short outer);
+
+static inline void
+lockdep_init_map_wait(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass, short inner)
+{
+ lockdep_init_map_waits(lock, name, key, subclass, inner, LD_WAIT_INV);
+}
+
+static inline void lockdep_init_map(struct lockdep_map *lock, const char *name,
+ struct lock_class_key *key, int subclass)
+{
+ lockdep_init_map_wait(lock, name, key, subclass, LD_WAIT_INV);
+}
/*
* Reinitialize a lock key - for cases where there is special locking or
@@ -308,18 +342,29 @@ extern void lockdep_init_map(struct lockdep_map *lock, const char *name,
* of dependencies wrong: they are either too broad (they need a class-split)
* or they are too narrow (they suffer from a false class-split):
*/
-#define lockdep_set_class(lock, key) \
- lockdep_init_map(&(lock)->dep_map, #key, key, 0)
-#define lockdep_set_class_and_name(lock, key, name) \
- lockdep_init_map(&(lock)->dep_map, name, key, 0)
-#define lockdep_set_class_and_subclass(lock, key, sub) \
- lockdep_init_map(&(lock)->dep_map, #key, key, sub)
-#define lockdep_set_subclass(lock, sub) \
- lockdep_init_map(&(lock)->dep_map, #lock, \
- (lock)->dep_map.key, sub)
+#define lockdep_set_class(lock, key) \
+ lockdep_init_map_waits(&(lock)->dep_map, #key, key, 0, \
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer)
+
+#define lockdep_set_class_and_name(lock, key, name) \
+ lockdep_init_map_waits(&(lock)->dep_map, name, key, 0, \
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer)
+
+#define lockdep_set_class_and_subclass(lock, key, sub) \
+ lockdep_init_map_waits(&(lock)->dep_map, #key, key, sub,\
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer)
+
+#define lockdep_set_subclass(lock, sub) \
+ lockdep_init_map_waits(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
+ (lock)->dep_map.wait_type_inner, \
+ (lock)->dep_map.wait_type_outer)
#define lockdep_set_novalidate_class(lock) \
lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
+
/*
* Compare locking classes
*/
@@ -349,8 +394,7 @@ extern void lock_acquire(struct lockdep_map *lock, unsigned int subclass,
int trylock, int read, int check,
struct lockdep_map *nest_lock, unsigned long ip);
-extern void lock_release(struct lockdep_map *lock, int nested,
- unsigned long ip);
+extern void lock_release(struct lockdep_map *lock, unsigned long ip);
/*
* Same "read" as for lock_acquire(), except -1 means any.
@@ -428,11 +472,15 @@ static inline void lockdep_set_selftest_task(struct task_struct *task)
}
# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
-# define lock_release(l, n, i) do { } while (0)
+# define lock_release(l, i) do { } while (0)
# define lock_downgrade(l, i) do { } while (0)
# define lock_set_class(l, n, k, s, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_init() do { } while (0)
+# define lockdep_init_map_waits(lock, name, key, sub, inner, outer) \
+ do { (void)(name); (void)(key); } while (0)
+# define lockdep_init_map_wait(lock, name, key, sub, inner) \
+ do { (void)(name); (void)(key); } while (0)
# define lockdep_init_map(lock, name, key, sub) \
do { (void)(name); (void)(key); } while (0)
# define lockdep_set_class(lock, key) do { (void)(key); } while (0)
@@ -591,42 +639,49 @@ static inline void print_irqtrace_events(struct task_struct *curr)
#define spin_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define spin_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
-#define spin_release(l, n, i) lock_release(l, n, i)
+#define spin_release(l, i) lock_release(l, i)
#define rwlock_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define rwlock_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
-#define rwlock_release(l, n, i) lock_release(l, n, i)
+#define rwlock_release(l, i) lock_release(l, i)
#define seqcount_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define seqcount_acquire_read(l, s, t, i) lock_acquire_shared_recursive(l, s, t, NULL, i)
-#define seqcount_release(l, n, i) lock_release(l, n, i)
+#define seqcount_release(l, i) lock_release(l, i)
#define mutex_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define mutex_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
-#define mutex_release(l, n, i) lock_release(l, n, i)
+#define mutex_release(l, i) lock_release(l, i)
#define rwsem_acquire(l, s, t, i) lock_acquire_exclusive(l, s, t, NULL, i)
#define rwsem_acquire_nest(l, s, t, n, i) lock_acquire_exclusive(l, s, t, n, i)
#define rwsem_acquire_read(l, s, t, i) lock_acquire_shared(l, s, t, NULL, i)
-#define rwsem_release(l, n, i) lock_release(l, n, i)
+#define rwsem_release(l, i) lock_release(l, i)
#define lock_map_acquire(l) lock_acquire_exclusive(l, 0, 0, NULL, _THIS_IP_)
#define lock_map_acquire_read(l) lock_acquire_shared_recursive(l, 0, 0, NULL, _THIS_IP_)
#define lock_map_acquire_tryread(l) lock_acquire_shared_recursive(l, 0, 1, NULL, _THIS_IP_)
-#define lock_map_release(l) lock_release(l, 1, _THIS_IP_)
+#define lock_map_release(l) lock_release(l, _THIS_IP_)
#ifdef CONFIG_PROVE_LOCKING
# define might_lock(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_); \
- lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
+ lock_release(&(lock)->dep_map, _THIS_IP_); \
} while (0)
# define might_lock_read(lock) \
do { \
typecheck(struct lockdep_map *, &(lock)->dep_map); \
lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_); \
- lock_release(&(lock)->dep_map, 0, _THIS_IP_); \
+ lock_release(&(lock)->dep_map, _THIS_IP_); \
+} while (0)
+# define might_lock_nested(lock, subclass) \
+do { \
+ typecheck(struct lockdep_map *, &(lock)->dep_map); \
+ lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL, \
+ _THIS_IP_); \
+ lock_release(&(lock)->dep_map, _THIS_IP_); \
} while (0)
#define lockdep_assert_irqs_enabled() do { \
@@ -650,11 +705,27 @@ do { \
#else
# define might_lock(lock) do { } while (0)
# define might_lock_read(lock) do { } while (0)
+# define might_lock_nested(lock, subclass) do { } while (0)
# define lockdep_assert_irqs_enabled() do { } while (0)
# define lockdep_assert_irqs_disabled() do { } while (0)
# define lockdep_assert_in_irq() do { } while (0)
#endif
+#ifdef CONFIG_PROVE_RAW_LOCK_NESTING
+
+# define lockdep_assert_RT_in_threaded_ctx() do { \
+ WARN_ONCE(debug_locks && !current->lockdep_recursion && \
+ current->hardirq_context && \
+ !(current->hardirq_threaded || current->irq_config), \
+ "Not in threaded context on PREEMPT_RT as expected\n"); \
+} while (0)
+
+#else
+
+# define lockdep_assert_RT_in_threaded_ctx() do { } while (0)
+
+#endif
+
#ifdef CONFIG_LOCKDEP
void lockdep_rcu_suspicious(const char *file, const int line, const char *s);
#else
diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h
index 88e1e6304a71..54945aa824b4 100644
--- a/include/linux/logic_pio.h
+++ b/include/linux/logic_pio.h
@@ -108,10 +108,10 @@ void logic_outsl(unsigned long addr, const void *buffer, unsigned int count);
* area by redefining the macro below.
*/
#define PIO_INDIRECT_SIZE 0x4000
-#define MMIO_UPPER_LIMIT (IO_SPACE_LIMIT - PIO_INDIRECT_SIZE)
#else
-#define MMIO_UPPER_LIMIT IO_SPACE_LIMIT
+#define PIO_INDIRECT_SIZE 0
#endif /* CONFIG_INDIRECT_PIO */
+#define MMIO_UPPER_LIMIT (IO_SPACE_LIMIT - PIO_INDIRECT_SIZE)
struct logic_pio_hwaddr *find_io_range_by_fwnode(struct fwnode_handle *fwnode);
unsigned long logic_pio_trans_hwaddr(struct fwnode_handle *fwnode,
diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h
index 915330abf6e5..99d629fd9944 100644
--- a/include/linux/lsm_audit.h
+++ b/include/linux/lsm_audit.h
@@ -74,6 +74,7 @@ struct common_audit_data {
#define LSM_AUDIT_DATA_FILE 12
#define LSM_AUDIT_DATA_IBPKEY 13
#define LSM_AUDIT_DATA_IBENDPORT 14
+#define LSM_AUDIT_DATA_LOCKDOWN 15
union {
struct path path;
struct dentry *dentry;
@@ -93,6 +94,7 @@ struct common_audit_data {
struct file *file;
struct lsm_ibpkey_audit *ibpkey;
struct lsm_ibendport_audit *ibendport;
+ int reason;
} u;
/* this union contains LSM specific data */
union {
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index 59f5ee945c1c..28d435049f24 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -1852,6 +1852,14 @@ union security_list_options {
void (*bpf_prog_free_security)(struct bpf_prog_aux *aux);
#endif /* CONFIG_BPF_SYSCALL */
int (*locked_down)(enum lockdown_reason what);
+#ifdef CONFIG_PERF_EVENTS
+ int (*perf_event_open)(struct perf_event_attr *attr, int type);
+ int (*perf_event_alloc)(struct perf_event *event);
+ void (*perf_event_free)(struct perf_event *event);
+ int (*perf_event_read)(struct perf_event *event);
+ int (*perf_event_write)(struct perf_event *event);
+
+#endif
};
struct security_hook_heads {
@@ -2094,6 +2102,13 @@ struct security_hook_heads {
struct hlist_head bpf_prog_free_security;
#endif /* CONFIG_BPF_SYSCALL */
struct hlist_head locked_down;
+#ifdef CONFIG_PERF_EVENTS
+ struct hlist_head perf_event_open;
+ struct hlist_head perf_event_alloc;
+ struct hlist_head perf_event_free;
+ struct hlist_head perf_event_read;
+ struct hlist_head perf_event_write;
+#endif
} __randomize_layout;
/*
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
index e6f54ef6698b..a4dc45fbec0a 100644
--- a/include/linux/mailbox/mtk-cmdq-mailbox.h
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -20,6 +20,16 @@
#define CMDQ_WFE_WAIT BIT(15)
#define CMDQ_WFE_WAIT_VALUE 0x1
+/*
+ * WFE arg_b
+ * bit 0-11: wait value
+ * bit 15: 1 - wait, 0 - no wait
+ * bit 16-27: update value
+ * bit 31: 1 - update, 0 - no update
+ */
+#define CMDQ_WFE_OPTION (CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | \
+ CMDQ_WFE_WAIT_VALUE)
+
/** cmdq event maximum */
#define CMDQ_MAX_EVENT 0x3ff
@@ -45,6 +55,7 @@
enum cmdq_code {
CMDQ_CODE_MASK = 0x02,
CMDQ_CODE_WRITE = 0x04,
+ CMDQ_CODE_POLL = 0x08,
CMDQ_CODE_JUMP = 0x10,
CMDQ_CODE_WFE = 0x20,
CMDQ_CODE_EOC = 0x40,
diff --git a/include/linux/math64.h b/include/linux/math64.h
index 65bef21cdddb..11a267413e8e 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -3,6 +3,7 @@
#define _LINUX_MATH64_H
#include <linux/types.h>
+#include <vdso/math64.h>
#include <asm/div64.h>
#if BITS_PER_LONG == 64
@@ -142,25 +143,6 @@ static inline s64 div_s64(s64 dividend, s32 divisor)
u32 iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder);
-static __always_inline u32
-__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
-{
- u32 ret = 0;
-
- while (dividend >= divisor) {
- /* The following asm() prevents the compiler from
- optimising this loop into a modulo operation. */
- asm("" : "+rm"(dividend));
-
- dividend -= divisor;
- ret++;
- }
-
- *remainder = dividend;
-
- return ret;
-}
-
#ifndef mul_u32_u32
/*
* Many a GCC version messes this up and generates a 64x64 mult :-(
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index f491690d54c6..079d17d96410 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -113,6 +113,9 @@ int memblock_add(phys_addr_t base, phys_addr_t size);
int memblock_remove(phys_addr_t base, phys_addr_t size);
int memblock_free(phys_addr_t base, phys_addr_t size);
int memblock_reserve(phys_addr_t base, phys_addr_t size);
+#ifdef CONFIG_HAVE_MEMBLOCK_PHYS_MAP
+int memblock_physmem_add(phys_addr_t base, phys_addr_t size);
+#endif
void memblock_trim_memory(phys_addr_t align);
bool memblock_overlaps_region(struct memblock_type *type,
phys_addr_t base, phys_addr_t size);
@@ -127,10 +130,6 @@ void reset_node_managed_pages(pg_data_t *pgdat);
void reset_all_zones_managed_pages(void);
/* Low level functions */
-int memblock_add_range(struct memblock_type *type,
- phys_addr_t base, phys_addr_t size,
- int nid, enum memblock_flags flags);
-
void __next_mem_range(u64 *idx, int nid, enum memblock_flags flags,
struct memblock_type *type_a,
struct memblock_type *type_b, phys_addr_t *out_start,
@@ -358,6 +357,9 @@ static inline phys_addr_t memblock_phys_alloc(phys_addr_t size,
MEMBLOCK_ALLOC_ACCESSIBLE);
}
+void *memblock_alloc_exact_nid_raw(phys_addr_t size, phys_addr_t align,
+ phys_addr_t min_addr, phys_addr_t max_addr,
+ int nid);
void *memblock_alloc_try_nid_raw(phys_addr_t size, phys_addr_t align,
phys_addr_t min_addr, phys_addr_t max_addr,
int nid);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ae703ea3ef48..e9ba01336d4e 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -58,7 +58,6 @@ enum mem_cgroup_protection {
struct mem_cgroup_reclaim_cookie {
pg_data_t *pgdat;
- int priority;
unsigned int generation;
};
@@ -81,7 +80,6 @@ struct mem_cgroup_id {
enum mem_cgroup_events_target {
MEM_CGROUP_TARGET_THRESH,
MEM_CGROUP_TARGET_SOFTLIMIT,
- MEM_CGROUP_TARGET_NUMAINFO,
MEM_CGROUP_NTARGETS,
};
@@ -112,7 +110,7 @@ struct memcg_shrinker_map {
};
/*
- * per-zone information in memory controller.
+ * per-node information in memory controller.
*/
struct mem_cgroup_per_node {
struct lruvec lruvec;
@@ -126,7 +124,7 @@ struct mem_cgroup_per_node {
unsigned long lru_zone_size[MAX_NR_ZONES][NR_LRU_LISTS];
- struct mem_cgroup_reclaim_iter iter[DEF_PRIORITY + 1];
+ struct mem_cgroup_reclaim_iter iter;
struct memcg_shrinker_map __rcu *shrinker_map;
@@ -134,9 +132,6 @@ struct mem_cgroup_per_node {
unsigned long usage_in_excess;/* Set to the value by which */
/* the soft limit is exceeded*/
bool on_tree;
- bool congested; /* memcg has many dirty pages */
- /* backed by a congested BDI */
-
struct mem_cgroup *memcg; /* Back pointer, we cannot */
/* use container_of */
};
@@ -313,13 +308,6 @@ struct mem_cgroup {
struct list_head kmem_caches;
#endif
- int last_scanned_node;
-#if MAX_NUMNODES > 1
- nodemask_t scan_nodes;
- atomic_t numainfo_events;
- atomic_t numainfo_updating;
-#endif
-
#ifdef CONFIG_CGROUP_WRITEBACK
struct list_head cgwb_list;
struct wb_domain cgwb_domain;
@@ -394,25 +382,27 @@ mem_cgroup_nodeinfo(struct mem_cgroup *memcg, int nid)
}
/**
- * mem_cgroup_lruvec - get the lru list vector for a node or a memcg zone
- * @node: node of the wanted lruvec
+ * mem_cgroup_lruvec - get the lru list vector for a memcg & node
* @memcg: memcg of the wanted lruvec
*
- * Returns the lru list vector holding pages for a given @node or a given
- * @memcg and @zone. This can be the node lruvec, if the memory controller
- * is disabled.
+ * Returns the lru list vector holding pages for a given @memcg &
+ * @node combination. This can be the node lruvec, if the memory
+ * controller is disabled.
*/
-static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
- struct mem_cgroup *memcg)
+static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
+ struct pglist_data *pgdat)
{
struct mem_cgroup_per_node *mz;
struct lruvec *lruvec;
if (mem_cgroup_disabled()) {
- lruvec = node_lruvec(pgdat);
+ lruvec = &pgdat->__lruvec;
goto out;
}
+ if (!memcg)
+ memcg = root_mem_cgroup;
+
mz = mem_cgroup_nodeinfo(memcg, pgdat->node_id);
lruvec = &mz->lruvec;
out:
@@ -705,6 +695,7 @@ static inline unsigned long lruvec_page_state_local(struct lruvec *lruvec,
void __mod_lruvec_state(struct lruvec *lruvec, enum node_stat_item idx,
int val);
void __mod_lruvec_slab_state(void *p, enum node_stat_item idx, int val);
+void mod_memcg_obj_state(void *p, int idx, int val);
static inline void mod_lruvec_state(struct lruvec *lruvec,
enum node_stat_item idx, int val)
@@ -728,7 +719,7 @@ static inline void __mod_lruvec_page_state(struct page *page,
return;
}
- lruvec = mem_cgroup_lruvec(pgdat, page->mem_cgroup);
+ lruvec = mem_cgroup_lruvec(page->mem_cgroup, pgdat);
__mod_lruvec_state(lruvec, idx, val);
}
@@ -899,16 +890,21 @@ static inline void mem_cgroup_migrate(struct page *old, struct page *new)
{
}
-static inline struct lruvec *mem_cgroup_lruvec(struct pglist_data *pgdat,
- struct mem_cgroup *memcg)
+static inline struct lruvec *mem_cgroup_lruvec(struct mem_cgroup *memcg,
+ struct pglist_data *pgdat)
{
- return node_lruvec(pgdat);
+ return &pgdat->__lruvec;
}
static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
struct pglist_data *pgdat)
{
- return &pgdat->lruvec;
+ return &pgdat->__lruvec;
+}
+
+static inline struct mem_cgroup *parent_mem_cgroup(struct mem_cgroup *memcg)
+{
+ return NULL;
}
static inline bool mm_match_cgroup(struct mm_struct *mm,
@@ -1128,6 +1124,10 @@ static inline void __mod_lruvec_slab_state(void *p, enum node_stat_item idx,
__mod_node_page_state(page_pgdat(page), idx, val);
}
+static inline void mod_memcg_obj_state(void *p, int idx, int val)
+{
+}
+
static inline
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
gfp_t gfp_mask,
@@ -1432,6 +1432,8 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
return memcg ? memcg->kmemcg_id : -1;
}
+struct mem_cgroup *mem_cgroup_from_obj(void *p);
+
#else
static inline int memcg_kmem_charge(struct page *page, gfp_t gfp, int order)
@@ -1473,6 +1475,11 @@ static inline void memcg_put_cache_ids(void)
{
}
+static inline struct mem_cgroup *mem_cgroup_from_obj(void *p)
+{
+ return NULL;
+}
+
#endif /* CONFIG_MEMCG_KMEM */
#endif /* _LINUX_MEMCONTROL_H */
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 4c75dae8dd29..0b8d791b6669 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -29,8 +29,6 @@ struct memory_block {
int section_count; /* serialized by mem_sysfs_mutex */
int online_type; /* for passing data to online routine */
int phys_device; /* to which fru does this belong? */
- void *hw; /* optional pointer to fw/hw data */
- int (*phys_callback)(struct memory_block *);
struct device dev;
int nid; /* NID for this memory block */
};
@@ -55,19 +53,6 @@ struct memory_notify {
int status_change_nid;
};
-/*
- * During pageblock isolation, count the number of pages within the
- * range [start_pfn, start_pfn + nr_pages) which are owned by code
- * in the notifier chain.
- */
-#define MEM_ISOLATE_COUNT (1<<0)
-
-struct memory_isolate_notify {
- unsigned long start_pfn; /* Start of range to check */
- unsigned int nr_pages; /* # pages in range to check */
- unsigned int pages_found; /* # pages owned found by callbacks */
-};
-
struct notifier_block;
struct mem_section;
@@ -94,27 +79,13 @@ static inline int memory_notify(unsigned long val, void *v)
{
return 0;
}
-static inline int register_memory_isolate_notifier(struct notifier_block *nb)
-{
- return 0;
-}
-static inline void unregister_memory_isolate_notifier(struct notifier_block *nb)
-{
-}
-static inline int memory_isolate_notify(unsigned long val, void *v)
-{
- return 0;
-}
#else
extern int register_memory_notifier(struct notifier_block *nb);
extern void unregister_memory_notifier(struct notifier_block *nb);
-extern int register_memory_isolate_notifier(struct notifier_block *nb);
-extern void unregister_memory_isolate_notifier(struct notifier_block *nb);
int create_memory_block_devices(unsigned long start, unsigned long size);
void remove_memory_block_devices(unsigned long start, unsigned long size);
extern void memory_dev_init(void);
extern int memory_notify(unsigned long val, void *v);
-extern int memory_isolate_notify(unsigned long val, void *v);
extern struct memory_block *find_memory_block(struct mem_section *);
typedef int (*walk_memory_blocks_func_t)(struct memory_block *, void *);
extern int walk_memory_blocks(unsigned long start, unsigned long size,
diff --git a/include/linux/memory_hotplug.h b/include/linux/memory_hotplug.h
index f46ea71b4ffd..f4d59155f3d4 100644
--- a/include/linux/memory_hotplug.h
+++ b/include/linux/memory_hotplug.h
@@ -94,21 +94,19 @@ extern int zone_grow_free_lists(struct zone *zone, unsigned long new_nr_pages);
extern int zone_grow_waitqueues(struct zone *zone, unsigned long nr_pages);
extern int add_one_highpage(struct page *page, int pfn, int bad_ppro);
/* VM interface that may be used by firmware interface */
-extern int online_pages(unsigned long, unsigned long, int);
-extern int test_pages_in_a_zone(unsigned long start_pfn, unsigned long end_pfn,
- unsigned long *valid_start, unsigned long *valid_end);
+extern int online_pages(unsigned long pfn, unsigned long nr_pages,
+ int online_type, int nid);
+extern struct zone *test_pages_in_a_zone(unsigned long start_pfn,
+ unsigned long end_pfn);
extern unsigned long __offline_isolated_pages(unsigned long start_pfn,
unsigned long end_pfn);
typedef void (*online_page_callback_t)(struct page *page, unsigned int order);
+extern void generic_online_page(struct page *page, unsigned int order);
extern int set_online_page_callback(online_page_callback_t callback);
extern int restore_online_page_callback(online_page_callback_t callback);
-extern void __online_page_set_limits(struct page *page);
-extern void __online_page_increment_counters(struct page *page);
-extern void __online_page_free(struct page *page);
-
extern int try_online_node(int nid);
extern int arch_add_memory(int nid, u64 start, u64 size,
@@ -125,8 +123,8 @@ static inline bool movable_node_is_enabled(void)
extern void arch_remove_memory(int nid, u64 start, u64 size,
struct vmem_altmap *altmap);
-extern void __remove_pages(struct zone *zone, unsigned long start_pfn,
- unsigned long nr_pages, struct vmem_altmap *altmap);
+extern void __remove_pages(unsigned long start_pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap);
/* reasonably generic interface to expand the physical pages */
extern int __add_pages(int nid, unsigned long start_pfn, unsigned long nr_pages,
@@ -229,9 +227,6 @@ void put_online_mems(void);
void mem_hotplug_begin(void);
void mem_hotplug_done(void);
-extern void set_zone_contiguous(struct zone *zone);
-extern void clear_zone_contiguous(struct zone *zone);
-
#else /* ! CONFIG_MEMORY_HOTPLUG */
#define pfn_to_online_page(pfn) \
({ \
@@ -339,12 +334,18 @@ static inline int remove_memory(int nid, u64 start, u64 size)
static inline void __remove_memory(int nid, u64 start, u64 size) {}
#endif /* CONFIG_MEMORY_HOTREMOVE */
+extern void set_zone_contiguous(struct zone *zone);
+extern void clear_zone_contiguous(struct zone *zone);
+
extern void __ref free_area_init_core_hotplug(int nid);
extern int __add_memory(int nid, u64 start, u64 size);
extern int add_memory(int nid, u64 start, u64 size);
extern int add_memory_resource(int nid, struct resource *resource);
extern void move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn,
unsigned long nr_pages, struct vmem_altmap *altmap);
+extern void remove_pfn_range_from_zone(struct zone *zone,
+ unsigned long start_pfn,
+ unsigned long nr_pages);
extern bool is_memblock_offlined(struct memory_block *mem);
extern int sparse_add_section(int nid, unsigned long pfn,
unsigned long nr_pages, struct vmem_altmap *altmap);
diff --git a/include/linux/memregion.h b/include/linux/memregion.h
new file mode 100644
index 000000000000..e11595256cac
--- /dev/null
+++ b/include/linux/memregion.h
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _MEMREGION_H_
+#define _MEMREGION_H_
+#include <linux/types.h>
+#include <linux/errno.h>
+
+struct memregion_info {
+ int target_node;
+};
+
+#ifdef CONFIG_MEMREGION
+int memregion_alloc(gfp_t gfp);
+void memregion_free(int id);
+#else
+static inline int memregion_alloc(gfp_t gfp)
+{
+ return -ENOMEM;
+}
+void memregion_free(int id)
+{
+}
+#endif
+#endif /* _MEMREGION_H_ */
diff --git a/include/linux/mfd/abx500/ab8500-gpadc.h b/include/linux/mfd/abx500/ab8500-gpadc.h
deleted file mode 100644
index 836c944abe2e..000000000000
--- a/include/linux/mfd/abx500/ab8500-gpadc.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (C) 2010 ST-Ericsson SA
- *
- * Author: Arun R Murthy <arun.murthy@stericsson.com>
- * Author: Daniel Willerud <daniel.willerud@stericsson.com>
- * Author: M'boumba Cedric Madianga <cedric.madianga@stericsson.com>
- */
-
-#ifndef _AB8500_GPADC_H
-#define _AB8500_GPADC_H
-
-/* GPADC source: From datasheet(ADCSwSel[4:0] in GPADCCtrl2
- * and ADCHwSel[4:0] in GPADCCtrl3 ) */
-#define BAT_CTRL 0x01
-#define BTEMP_BALL 0x02
-#define MAIN_CHARGER_V 0x03
-#define ACC_DETECT1 0x04
-#define ACC_DETECT2 0x05
-#define ADC_AUX1 0x06
-#define ADC_AUX2 0x07
-#define MAIN_BAT_V 0x08
-#define VBUS_V 0x09
-#define MAIN_CHARGER_C 0x0A
-#define USB_CHARGER_C 0x0B
-#define BK_BAT_V 0x0C
-#define DIE_TEMP 0x0D
-#define USB_ID 0x0E
-#define XTAL_TEMP 0x12
-#define VBAT_TRUE_MEAS 0x13
-#define BAT_CTRL_AND_IBAT 0x1C
-#define VBAT_MEAS_AND_IBAT 0x1D
-#define VBAT_TRUE_MEAS_AND_IBAT 0x1E
-#define BAT_TEMP_AND_IBAT 0x1F
-
-/* Virtual channel used only for ibat convertion to ampere
- * Battery current conversion (ibat) cannot be requested as a single conversion
- * but it is always in combination with other input requests
- */
-#define IBAT_VIRTUAL_CHANNEL 0xFF
-
-#define SAMPLE_1 1
-#define SAMPLE_4 4
-#define SAMPLE_8 8
-#define SAMPLE_16 16
-#define RISING_EDGE 0
-#define FALLING_EDGE 1
-
-/* Arbitrary ADC conversion type constants */
-#define ADC_SW 0
-#define ADC_HW 1
-
-struct ab8500_gpadc;
-
-struct ab8500_gpadc *ab8500_gpadc_get(char *name);
-int ab8500_gpadc_sw_hw_convert(struct ab8500_gpadc *gpadc, u8 channel,
- u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type);
-static inline int ab8500_gpadc_convert(struct ab8500_gpadc *gpadc, u8 channel)
-{
- return ab8500_gpadc_sw_hw_convert(gpadc, channel,
- SAMPLE_16, 0, 0, ADC_SW);
-}
-
-int ab8500_gpadc_read_raw(struct ab8500_gpadc *gpadc, u8 channel,
- u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type);
-int ab8500_gpadc_double_read_raw(struct ab8500_gpadc *gpadc, u8 channel,
- u8 avg_sample, u8 trig_edge, u8 trig_timer, u8 conv_type,
- int *ibat);
-int ab8500_gpadc_ad_to_voltage(struct ab8500_gpadc *gpadc,
- u8 channel, int ad_value);
-void ab8540_gpadc_get_otp(struct ab8500_gpadc *gpadc,
- u16 *vmain_l, u16 *vmain_h, u16 *btemp_l, u16 *btemp_h,
- u16 *vbat_l, u16 *vbat_h, u16 *ibat_l, u16 *ibat_h);
-
-#endif /* _AB8500_GPADC_H */
diff --git a/include/linux/mfd/arizona/registers.h b/include/linux/mfd/arizona/registers.h
index bb1a2530ae27..49e24d1de8d4 100644
--- a/include/linux/mfd/arizona/registers.h
+++ b/include/linux/mfd/arizona/registers.h
@@ -1186,13 +1186,6 @@
#define ARIZONA_DSP4_SCRATCH_1 0x1441
#define ARIZONA_DSP4_SCRATCH_2 0x1442
#define ARIZONA_DSP4_SCRATCH_3 0x1443
-#define ARIZONA_FRF_COEFF_1 0x1700
-#define ARIZONA_FRF_COEFF_2 0x1701
-#define ARIZONA_FRF_COEFF_3 0x1702
-#define ARIZONA_FRF_COEFF_4 0x1703
-#define ARIZONA_V2_DAC_COMP_1 0x1704
-#define ARIZONA_V2_DAC_COMP_2 0x1705
-
/*
* Field Definitions.
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index b43fc5773ad7..d01d1299e49d 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -12,6 +12,35 @@
#include <linux/platform_device.h>
+#define MFD_RES_SIZE(arr) (sizeof(arr) / sizeof(struct resource))
+
+#define MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, _match)\
+ { \
+ .name = (_name), \
+ .resources = (_res), \
+ .num_resources = MFD_RES_SIZE((_res)), \
+ .platform_data = (_pdata), \
+ .pdata_size = (_pdsize), \
+ .of_compatible = (_compat), \
+ .acpi_match = (_match), \
+ .id = (_id), \
+ }
+
+#define OF_MFD_CELL(_name, _res, _pdata, _pdsize,_id, _compat) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, _compat, NULL) \
+
+#define ACPI_MFD_CELL(_name, _res, _pdata, _pdsize, _id, _match) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, _match) \
+
+#define MFD_CELL_BASIC(_name, _res, _pdata, _pdsize, _id) \
+ MFD_CELL_ALL(_name, _res, _pdata, _pdsize, _id, NULL, NULL) \
+
+#define MFD_CELL_RES(_name, _res) \
+ MFD_CELL_ALL(_name, _res, NULL, 0, 0, NULL, NULL) \
+
+#define MFD_CELL_NAME(_name) \
+ MFD_CELL_ALL(_name, NULL, NULL, 0, 0, NULL, NULL) \
+
struct irq_domain;
struct property_entry;
@@ -30,8 +59,6 @@ struct mfd_cell {
const char *name;
int id;
- /* refcounting for multiple drivers to use a single cell */
- atomic_t *usage_count;
int (*enable)(struct platform_device *dev);
int (*disable)(struct platform_device *dev);
@@ -87,24 +114,6 @@ extern int mfd_cell_enable(struct platform_device *pdev);
extern int mfd_cell_disable(struct platform_device *pdev);
/*
- * "Clone" multiple platform devices for a single cell. This is to be used
- * for devices that have multiple users of a cell. For example, if an mfd
- * driver wants the cell "foo" to be used by a GPIO driver, an MTD driver,
- * and a platform driver, the following bit of code would be use after first
- * calling mfd_add_devices():
- *
- * const char *fclones[] = { "foo-gpio", "foo-mtd" };
- * err = mfd_clone_cells("foo", fclones, ARRAY_SIZE(fclones));
- *
- * Each driver (MTD, GPIO, and platform driver) would then register
- * platform_drivers for "foo-mtd", "foo-gpio", and "foo", respectively.
- * The cell's .enable/.disable hooks should be used to deal with hardware
- * resource contention.
- */
-extern int mfd_clone_cell(const char *cell, const char **clones,
- size_t n_clones);
-
-/*
* Given a platform device that's been created by mfd_add_devices(), fetch
* the mfd_cell that created it.
*/
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
deleted file mode 100644
index 61c2875c2a40..000000000000
--- a/include/linux/mfd/cros_ec.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * ChromeOS EC multi-function device
- *
- * Copyright (C) 2012 Google, Inc
- */
-
-#ifndef __LINUX_MFD_CROS_EC_H
-#define __LINUX_MFD_CROS_EC_H
-
-#include <linux/device.h>
-
-/**
- * struct cros_ec_dev - ChromeOS EC device entry point.
- * @class_dev: Device structure used in sysfs.
- * @ec_dev: cros_ec_device structure to talk to the physical device.
- * @dev: Pointer to the platform device.
- * @debug_info: cros_ec_debugfs structure for debugging information.
- * @has_kb_wake_angle: True if at least 2 accelerometer are connected to the EC.
- * @cmd_offset: Offset to apply for each command.
- * @features: Features supported by the EC.
- */
-struct cros_ec_dev {
- struct device class_dev;
- struct cros_ec_device *ec_dev;
- struct device *dev;
- struct cros_ec_debugfs *debug_info;
- bool has_kb_wake_angle;
- u16 cmd_offset;
- u32 features[2];
-};
-
-#define to_cros_ec_dev(dev) container_of(dev, struct cros_ec_dev, class_dev)
-
-#endif /* __LINUX_MFD_CROS_EC_H */
diff --git a/include/linux/mfd/db8500-prcmu.h b/include/linux/mfd/db8500-prcmu.h
index 813710aa2cfd..4b63d3ecdcff 100644
--- a/include/linux/mfd/db8500-prcmu.h
+++ b/include/linux/mfd/db8500-prcmu.h
@@ -489,7 +489,7 @@ struct prcmu_auto_pm_config {
#ifdef CONFIG_MFD_DB8500_PRCMU
-void db8500_prcmu_early_init(u32 phy_base, u32 size);
+void db8500_prcmu_early_init(void);
int prcmu_set_rc_a2p(enum romcode_write);
enum romcode_read prcmu_get_rc_p2a(void);
enum ap_pwrst prcmu_get_xp70_current_state(void);
@@ -525,9 +525,6 @@ u8 db8500_prcmu_get_power_state_result(void);
void db8500_prcmu_enable_wakeups(u32 wakeups);
int db8500_prcmu_set_epod(u16 epod_id, u8 epod_state);
int db8500_prcmu_request_clock(u8 clock, bool enable);
-int db8500_prcmu_set_display_clocks(void);
-int db8500_prcmu_disable_dsipll(void);
-int db8500_prcmu_enable_dsipll(void);
void db8500_prcmu_config_abb_event_readout(u32 abb_events);
void db8500_prcmu_get_abb_event_buffer(void __iomem **buf);
int db8500_prcmu_config_esram0_deep_sleep(u8 state);
@@ -546,7 +543,7 @@ void db8500_prcmu_write_masked(unsigned int reg, u32 mask, u32 value);
#else /* !CONFIG_MFD_DB8500_PRCMU */
-static inline void db8500_prcmu_early_init(u32 phy_base, u32 size) {}
+static inline void db8500_prcmu_early_init(void) {}
static inline int prcmu_set_rc_a2p(enum romcode_write code)
{
@@ -682,21 +679,6 @@ static inline int db8500_prcmu_request_clock(u8 clock, bool enable)
return 0;
}
-static inline int db8500_prcmu_set_display_clocks(void)
-{
- return 0;
-}
-
-static inline int db8500_prcmu_disable_dsipll(void)
-{
- return 0;
-}
-
-static inline int db8500_prcmu_enable_dsipll(void)
-{
- return 0;
-}
-
static inline int db8500_prcmu_config_esram0_deep_sleep(u8 state)
{
return 0;
diff --git a/include/linux/mfd/dbx500-prcmu.h b/include/linux/mfd/dbx500-prcmu.h
index 238401a50d0b..e6ee2ec35de9 100644
--- a/include/linux/mfd/dbx500-prcmu.h
+++ b/include/linux/mfd/dbx500-prcmu.h
@@ -190,6 +190,7 @@ enum ddr_pwrst {
#define PRCMU_FW_PROJECT_U8500_MBL2 12 /* Customer specific */
#define PRCMU_FW_PROJECT_U8520 13
#define PRCMU_FW_PROJECT_U8420 14
+#define PRCMU_FW_PROJECT_U8420_SYSCLK 17
#define PRCMU_FW_PROJECT_A9420 20
/* [32..63] 9540 and derivatives */
#define PRCMU_FW_PROJECT_U9540 32
@@ -211,9 +212,9 @@ struct prcmu_fw_version {
#if defined(CONFIG_UX500_SOC_DB8500)
-static inline void prcmu_early_init(u32 phy_base, u32 size)
+static inline void prcmu_early_init(void)
{
- return db8500_prcmu_early_init(phy_base, size);
+ return db8500_prcmu_early_init();
}
static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk,
@@ -320,21 +321,6 @@ static inline bool prcmu_is_ac_wake_requested(void)
return db8500_prcmu_is_ac_wake_requested();
}
-static inline int prcmu_set_display_clocks(void)
-{
- return db8500_prcmu_set_display_clocks();
-}
-
-static inline int prcmu_disable_dsipll(void)
-{
- return db8500_prcmu_disable_dsipll();
-}
-
-static inline int prcmu_enable_dsipll(void)
-{
- return db8500_prcmu_enable_dsipll();
-}
-
static inline int prcmu_config_esram0_deep_sleep(u8 state)
{
return db8500_prcmu_config_esram0_deep_sleep(state);
@@ -401,7 +387,7 @@ static inline int prcmu_config_a9wdog(u8 num, bool sleep_auto_off)
}
#else
-static inline void prcmu_early_init(u32 phy_base, u32 size) {}
+static inline void prcmu_early_init(void) {}
static inline int prcmu_set_power_state(u8 state, bool keep_ulp_clk,
bool keep_ap_pll)
@@ -510,21 +496,6 @@ static inline bool prcmu_is_ac_wake_requested(void)
return false;
}
-static inline int prcmu_set_display_clocks(void)
-{
- return 0;
-}
-
-static inline int prcmu_disable_dsipll(void)
-{
- return 0;
-}
-
-static inline int prcmu_enable_dsipll(void)
-{
- return 0;
-}
-
static inline int prcmu_config_esram0_deep_sleep(u8 state)
{
return 0;
diff --git a/include/linux/mfd/madera/core.h b/include/linux/mfd/madera/core.h
index 7ffa696cce7c..ad2c138105d4 100644
--- a/include/linux/mfd/madera/core.h
+++ b/include/linux/mfd/madera/core.h
@@ -8,6 +8,7 @@
#ifndef MADERA_CORE_H
#define MADERA_CORE_H
+#include <linux/clk.h>
#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/mfd/madera/pdata.h>
@@ -29,6 +30,13 @@ enum madera_type {
CS42L92 = 9,
};
+enum {
+ MADERA_MCLK1,
+ MADERA_MCLK2,
+ MADERA_MCLK3,
+ MADERA_NUM_MCLK
+};
+
#define MADERA_MAX_CORE_SUPPLIES 2
#define MADERA_MAX_GPIOS 40
@@ -155,6 +163,7 @@ struct snd_soc_dapm_context;
* @irq_dev: the irqchip child driver device
* @irq_data: pointer to irqchip data for the child irqchip driver
* @irq: host irq number from SPI or I2C configuration
+ * @mclk: Structure holding clock supplies
* @out_clamp: indicates output clamp state for each analogue output
* @out_shorted: indicates short circuit state for each analogue output
* @hp_ena: bitflags of enable state for the headphone outputs
@@ -184,6 +193,8 @@ struct madera {
struct regmap_irq_chip_data *irq_data;
int irq;
+ struct clk_bulk_data mclk[MADERA_NUM_MCLK];
+
unsigned int num_micbias;
unsigned int num_childbias[MADERA_MAX_MICBIAS];
diff --git a/include/linux/mfd/max77620.h b/include/linux/mfd/max77620.h
index 12ba157cb83f..f552ef5b1100 100644
--- a/include/linux/mfd/max77620.h
+++ b/include/linux/mfd/max77620.h
@@ -329,7 +329,6 @@ struct max77620_chip {
struct regmap *rmap;
int chip_irq;
- int irq_base;
/* chip id */
enum max77620_chip_id chip_id;
diff --git a/include/linux/mfd/mt6397/rtc.h b/include/linux/mfd/mt6397/rtc.h
new file mode 100644
index 000000000000..7dfb63b81373
--- /dev/null
+++ b/include/linux/mfd/mt6397/rtc.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2014-2019 MediaTek Inc.
+ *
+ * Author: Tianping.Fang <tianping.fang@mediatek.com>
+ * Sean Wang <sean.wang@mediatek.com>
+ */
+
+#ifndef _LINUX_MFD_MT6397_RTC_H_
+#define _LINUX_MFD_MT6397_RTC_H_
+
+#include <linux/jiffies.h>
+#include <linux/mutex.h>
+#include <linux/regmap.h>
+#include <linux/rtc.h>
+
+#define RTC_BBPU 0x0000
+#define RTC_BBPU_CBUSY BIT(6)
+#define RTC_BBPU_KEY (0x43 << 8)
+
+#define RTC_WRTGR 0x003c
+
+#define RTC_IRQ_STA 0x0002
+#define RTC_IRQ_STA_AL BIT(0)
+#define RTC_IRQ_STA_LP BIT(3)
+
+#define RTC_IRQ_EN 0x0004
+#define RTC_IRQ_EN_AL BIT(0)
+#define RTC_IRQ_EN_ONESHOT BIT(2)
+#define RTC_IRQ_EN_LP BIT(3)
+#define RTC_IRQ_EN_ONESHOT_AL (RTC_IRQ_EN_ONESHOT | RTC_IRQ_EN_AL)
+
+#define RTC_AL_MASK 0x0008
+#define RTC_AL_MASK_DOW BIT(4)
+
+#define RTC_TC_SEC 0x000a
+/* Min, Hour, Dom... register offset to RTC_TC_SEC */
+#define RTC_OFFSET_SEC 0
+#define RTC_OFFSET_MIN 1
+#define RTC_OFFSET_HOUR 2
+#define RTC_OFFSET_DOM 3
+#define RTC_OFFSET_DOW 4
+#define RTC_OFFSET_MTH 5
+#define RTC_OFFSET_YEAR 6
+#define RTC_OFFSET_COUNT 7
+
+#define RTC_AL_SEC 0x0018
+
+#define RTC_AL_SEC_MASK 0x003f
+#define RTC_AL_MIN_MASK 0x003f
+#define RTC_AL_HOU_MASK 0x001f
+#define RTC_AL_DOM_MASK 0x001f
+#define RTC_AL_DOW_MASK 0x0007
+#define RTC_AL_MTH_MASK 0x000f
+#define RTC_AL_YEA_MASK 0x007f
+
+#define RTC_PDN2 0x002e
+#define RTC_PDN2_PWRON_ALARM BIT(4)
+
+#define RTC_MIN_YEAR 1968
+#define RTC_BASE_YEAR 1900
+#define RTC_NUM_YEARS 128
+#define RTC_MIN_YEAR_OFFSET (RTC_MIN_YEAR - RTC_BASE_YEAR)
+
+#define MTK_RTC_POLL_DELAY_US 10
+#define MTK_RTC_POLL_TIMEOUT (jiffies_to_usecs(HZ))
+
+struct mt6397_rtc {
+ struct device *dev;
+ struct rtc_device *rtc_dev;
+
+ /* Protect register access from multiple tasks */
+ struct mutex lock;
+ struct regmap *regmap;
+ int irq;
+ u32 addr_base;
+};
+
+#endif /* _LINUX_MFD_MT6397_RTC_H_ */
diff --git a/include/linux/mfd/rk808.h b/include/linux/mfd/rk808.h
index 7cfd2b0504df..a59bf323f713 100644
--- a/include/linux/mfd/rk808.h
+++ b/include/linux/mfd/rk808.h
@@ -610,7 +610,7 @@ enum {
RK808_ID = 0x0000,
RK809_ID = 0x8090,
RK817_ID = 0x8170,
- RK818_ID = 0x8181,
+ RK818_ID = 0x8180,
};
struct rk808 {
diff --git a/include/linux/mfd/rohm-bd70528.h b/include/linux/mfd/rohm-bd70528.h
index 1013e60c5b25..a57af878fd0c 100644
--- a/include/linux/mfd/rohm-bd70528.h
+++ b/include/linux/mfd/rohm-bd70528.h
@@ -7,6 +7,7 @@
#include <linux/bits.h>
#include <linux/device.h>
#include <linux/mfd/rohm-generic.h>
+#include <linux/mfd/rohm-shared.h>
#include <linux/regmap.h>
enum {
@@ -89,10 +90,6 @@ struct bd70528_data {
#define BD70528_REG_GPIO3_OUT 0x52
#define BD70528_REG_GPIO4_OUT 0x54
-/* clk control */
-
-#define BD70528_REG_CLK_OUT 0x2c
-
/* RTC */
#define BD70528_REG_RTC_COUNT_H 0x2d
@@ -309,21 +306,8 @@ enum {
#define BD70528_GPIO_IN_STATE_BASE 1
-#define BD70528_CLK_OUT_EN_MASK 0x1
-
/* RTC masks to mask out reserved bits */
-#define BD70528_MASK_RTC_SEC 0x7f
-#define BD70528_MASK_RTC_MINUTE 0x7f
-#define BD70528_MASK_RTC_HOUR_24H 0x80
-#define BD70528_MASK_RTC_HOUR_PM 0x20
-#define BD70528_MASK_RTC_HOUR 0x1f
-#define BD70528_MASK_RTC_DAY 0x3f
-#define BD70528_MASK_RTC_WEEK 0x07
-#define BD70528_MASK_RTC_MONTH 0x1f
-#define BD70528_MASK_RTC_YEAR 0xff
-#define BD70528_MASK_RTC_COUNT_L 0x7f
-
#define BD70528_MASK_ELAPSED_TIMER_EN 0x1
/* Mask second, min and hour fields
* HW would support ALM irq for over 24h
@@ -332,7 +316,6 @@ enum {
* wake-up we limit ALM to 24H and only
* unmask sec, min and hour
*/
-#define BD70528_MASK_ALM_EN 0x7
#define BD70528_MASK_WAKE_EN 0x1
/* WDT masks */
diff --git a/include/linux/mfd/rohm-bd71828.h b/include/linux/mfd/rohm-bd71828.h
new file mode 100644
index 000000000000..017a4c01cb31
--- /dev/null
+++ b/include/linux/mfd/rohm-bd71828.h
@@ -0,0 +1,423 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2019 ROHM Semiconductors */
+
+#ifndef __LINUX_MFD_BD71828_H__
+#define __LINUX_MFD_BD71828_H__
+
+#include <linux/mfd/rohm-generic.h>
+#include <linux/mfd/rohm-shared.h>
+
+/* Regulator IDs */
+enum {
+ BD71828_BUCK1,
+ BD71828_BUCK2,
+ BD71828_BUCK3,
+ BD71828_BUCK4,
+ BD71828_BUCK5,
+ BD71828_BUCK6,
+ BD71828_BUCK7,
+ BD71828_LDO1,
+ BD71828_LDO2,
+ BD71828_LDO3,
+ BD71828_LDO4,
+ BD71828_LDO5,
+ BD71828_LDO6,
+ BD71828_LDO_SNVS,
+ BD71828_REGULATOR_AMOUNT,
+};
+
+#define BD71828_BUCK1267_VOLTS 0xEF
+#define BD71828_BUCK3_VOLTS 0x10
+#define BD71828_BUCK4_VOLTS 0x20
+#define BD71828_BUCK5_VOLTS 0x10
+#define BD71828_LDO_VOLTS 0x32
+/* LDO6 is fixed 1.8V voltage */
+#define BD71828_LDO_6_VOLTAGE 1800000
+
+/* Registers and masks*/
+
+/* MODE control */
+#define BD71828_REG_PS_CTRL_1 0x04
+#define BD71828_REG_PS_CTRL_2 0x05
+#define BD71828_REG_PS_CTRL_3 0x06
+
+//#define BD71828_REG_SWRESET 0x06
+#define BD71828_MASK_RUN_LVL_CTRL 0x30
+
+/* Regulator control masks */
+
+#define BD71828_MASK_RAMP_DELAY 0x6
+
+#define BD71828_MASK_RUN_EN 0x08
+#define BD71828_MASK_SUSP_EN 0x04
+#define BD71828_MASK_IDLE_EN 0x02
+#define BD71828_MASK_LPSR_EN 0x01
+
+#define BD71828_MASK_RUN0_EN 0x01
+#define BD71828_MASK_RUN1_EN 0x02
+#define BD71828_MASK_RUN2_EN 0x04
+#define BD71828_MASK_RUN3_EN 0x08
+
+#define BD71828_MASK_DVS_BUCK1_CTRL 0x10
+#define BD71828_DVS_BUCK1_CTRL_I2C 0
+#define BD71828_DVS_BUCK1_USE_RUNLVL 0x10
+
+#define BD71828_MASK_DVS_BUCK2_CTRL 0x20
+#define BD71828_DVS_BUCK2_CTRL_I2C 0
+#define BD71828_DVS_BUCK2_USE_RUNLVL 0x20
+
+#define BD71828_MASK_DVS_BUCK6_CTRL 0x40
+#define BD71828_DVS_BUCK6_CTRL_I2C 0
+#define BD71828_DVS_BUCK6_USE_RUNLVL 0x40
+
+#define BD71828_MASK_DVS_BUCK7_CTRL 0x80
+#define BD71828_DVS_BUCK7_CTRL_I2C 0
+#define BD71828_DVS_BUCK7_USE_RUNLVL 0x80
+
+#define BD71828_MASK_BUCK1267_VOLT 0xff
+#define BD71828_MASK_BUCK3_VOLT 0x1f
+#define BD71828_MASK_BUCK4_VOLT 0x3f
+#define BD71828_MASK_BUCK5_VOLT 0x1f
+#define BD71828_MASK_LDO_VOLT 0x3f
+
+/* Regulator control regs */
+#define BD71828_REG_BUCK1_EN 0x08
+#define BD71828_REG_BUCK1_CTRL 0x09
+#define BD71828_REG_BUCK1_MODE 0x0a
+#define BD71828_REG_BUCK1_IDLE_VOLT 0x0b
+#define BD71828_REG_BUCK1_SUSP_VOLT 0x0c
+#define BD71828_REG_BUCK1_VOLT 0x0d
+
+#define BD71828_REG_BUCK2_EN 0x12
+#define BD71828_REG_BUCK2_CTRL 0x13
+#define BD71828_REG_BUCK2_MODE 0x14
+#define BD71828_REG_BUCK2_IDLE_VOLT 0x15
+#define BD71828_REG_BUCK2_SUSP_VOLT 0x16
+#define BD71828_REG_BUCK2_VOLT 0x17
+
+#define BD71828_REG_BUCK3_EN 0x1c
+#define BD71828_REG_BUCK3_MODE 0x1d
+#define BD71828_REG_BUCK3_VOLT 0x1e
+
+#define BD71828_REG_BUCK4_EN 0x1f
+#define BD71828_REG_BUCK4_MODE 0x20
+#define BD71828_REG_BUCK4_VOLT 0x21
+
+#define BD71828_REG_BUCK5_EN 0x22
+#define BD71828_REG_BUCK5_MODE 0x23
+#define BD71828_REG_BUCK5_VOLT 0x24
+
+#define BD71828_REG_BUCK6_EN 0x25
+#define BD71828_REG_BUCK6_CTRL 0x26
+#define BD71828_REG_BUCK6_MODE 0x27
+#define BD71828_REG_BUCK6_IDLE_VOLT 0x28
+#define BD71828_REG_BUCK6_SUSP_VOLT 0x29
+#define BD71828_REG_BUCK6_VOLT 0x2a
+
+#define BD71828_REG_BUCK7_EN 0x2f
+#define BD71828_REG_BUCK7_CTRL 0x30
+#define BD71828_REG_BUCK7_MODE 0x31
+#define BD71828_REG_BUCK7_IDLE_VOLT 0x32
+#define BD71828_REG_BUCK7_SUSP_VOLT 0x33
+#define BD71828_REG_BUCK7_VOLT 0x34
+
+#define BD71828_REG_LDO1_EN 0x39
+#define BD71828_REG_LDO1_VOLT 0x3a
+#define BD71828_REG_LDO2_EN 0x3b
+#define BD71828_REG_LDO2_VOLT 0x3c
+#define BD71828_REG_LDO3_EN 0x3d
+#define BD71828_REG_LDO3_VOLT 0x3e
+#define BD71828_REG_LDO4_EN 0x3f
+#define BD71828_REG_LDO4_VOLT 0x40
+#define BD71828_REG_LDO5_EN 0x41
+#define BD71828_REG_LDO5_VOLT 0x43
+#define BD71828_REG_LDO5_VOLT_OPT 0x42
+#define BD71828_REG_LDO6_EN 0x44
+//#define BD71828_REG_LDO6_VOLT 0x4
+#define BD71828_REG_LDO7_EN 0x45
+#define BD71828_REG_LDO7_VOLT 0x46
+
+/* GPIO */
+
+#define BD71828_GPIO_DRIVE_MASK 0x2
+#define BD71828_GPIO_OPEN_DRAIN 0x0
+#define BD71828_GPIO_PUSH_PULL 0x2
+#define BD71828_GPIO_OUT_HI 0x1
+#define BD71828_GPIO_OUT_LO 0x0
+#define BD71828_GPIO_OUT_MASK 0x1
+
+#define BD71828_REG_GPIO_CTRL1 0x47
+#define BD71828_REG_GPIO_CTRL2 0x48
+#define BD71828_REG_GPIO_CTRL3 0x49
+#define BD71828_REG_IO_STAT 0xed
+
+/* RTC */
+#define BD71828_REG_RTC_SEC 0x4c
+#define BD71828_REG_RTC_MINUTE 0x4d
+#define BD71828_REG_RTC_HOUR 0x4e
+#define BD71828_REG_RTC_WEEK 0x4f
+#define BD71828_REG_RTC_DAY 0x50
+#define BD71828_REG_RTC_MONTH 0x51
+#define BD71828_REG_RTC_YEAR 0x52
+
+#define BD71828_REG_RTC_ALM0_SEC 0x53
+#define BD71828_REG_RTC_ALM_START BD71828_REG_RTC_ALM0_SEC
+#define BD71828_REG_RTC_ALM0_MINUTE 0x54
+#define BD71828_REG_RTC_ALM0_HOUR 0x55
+#define BD71828_REG_RTC_ALM0_WEEK 0x56
+#define BD71828_REG_RTC_ALM0_DAY 0x57
+#define BD71828_REG_RTC_ALM0_MONTH 0x58
+#define BD71828_REG_RTC_ALM0_YEAR 0x59
+#define BD71828_REG_RTC_ALM0_MASK 0x61
+
+#define BD71828_REG_RTC_ALM1_SEC 0x5a
+#define BD71828_REG_RTC_ALM1_MINUTE 0x5b
+#define BD71828_REG_RTC_ALM1_HOUR 0x5c
+#define BD71828_REG_RTC_ALM1_WEEK 0x5d
+#define BD71828_REG_RTC_ALM1_DAY 0x5e
+#define BD71828_REG_RTC_ALM1_MONTH 0x5f
+#define BD71828_REG_RTC_ALM1_YEAR 0x60
+#define BD71828_REG_RTC_ALM1_MASK 0x62
+
+#define BD71828_REG_RTC_ALM2 0x63
+#define BD71828_REG_RTC_START BD71828_REG_RTC_SEC
+
+/* Charger/Battey */
+#define BD71828_REG_CHG_STATE 0x65
+#define BD71828_REG_CHG_FULL 0xd2
+
+/* LEDs */
+#define BD71828_REG_LED_CTRL 0x4A
+#define BD71828_MASK_LED_AMBER 0x80
+#define BD71828_MASK_LED_GREEN 0x40
+#define BD71828_LED_ON 0xff
+#define BD71828_LED_OFF 0x0
+
+/* IRQ registers */
+#define BD71828_REG_INT_MASK_BUCK 0xd3
+#define BD71828_REG_INT_MASK_DCIN1 0xd4
+#define BD71828_REG_INT_MASK_DCIN2 0xd5
+#define BD71828_REG_INT_MASK_VSYS 0xd6
+#define BD71828_REG_INT_MASK_CHG 0xd7
+#define BD71828_REG_INT_MASK_BAT 0xd8
+#define BD71828_REG_INT_MASK_BAT_MON1 0xd9
+#define BD71828_REG_INT_MASK_BAT_MON2 0xda
+#define BD71828_REG_INT_MASK_BAT_MON3 0xdb
+#define BD71828_REG_INT_MASK_BAT_MON4 0xdc
+#define BD71828_REG_INT_MASK_TEMP 0xdd
+#define BD71828_REG_INT_MASK_RTC 0xde
+
+#define BD71828_REG_INT_MAIN 0xdf
+#define BD71828_REG_INT_BUCK 0xe0
+#define BD71828_REG_INT_DCIN1 0xe1
+#define BD71828_REG_INT_DCIN2 0xe2
+#define BD71828_REG_INT_VSYS 0xe3
+#define BD71828_REG_INT_CHG 0xe4
+#define BD71828_REG_INT_BAT 0xe5
+#define BD71828_REG_INT_BAT_MON1 0xe6
+#define BD71828_REG_INT_BAT_MON2 0xe7
+#define BD71828_REG_INT_BAT_MON3 0xe8
+#define BD71828_REG_INT_BAT_MON4 0xe9
+#define BD71828_REG_INT_TEMP 0xea
+#define BD71828_REG_INT_RTC 0xeb
+#define BD71828_REG_INT_UPDATE 0xec
+
+#define BD71828_MAX_REGISTER BD71828_REG_IO_STAT
+
+/* Masks for main IRQ register bits */
+enum {
+ BD71828_INT_BUCK,
+#define BD71828_INT_BUCK_MASK BIT(BD71828_INT_BUCK)
+ BD71828_INT_DCIN,
+#define BD71828_INT_DCIN_MASK BIT(BD71828_INT_DCIN)
+ BD71828_INT_VSYS,
+#define BD71828_INT_VSYS_MASK BIT(BD71828_INT_VSYS)
+ BD71828_INT_CHG,
+#define BD71828_INT_CHG_MASK BIT(BD71828_INT_CHG)
+ BD71828_INT_BAT,
+#define BD71828_INT_BAT_MASK BIT(BD71828_INT_BAT)
+ BD71828_INT_BAT_MON,
+#define BD71828_INT_BAT_MON_MASK BIT(BD71828_INT_BAT_MON)
+ BD71828_INT_TEMP,
+#define BD71828_INT_TEMP_MASK BIT(BD71828_INT_TEMP)
+ BD71828_INT_RTC,
+#define BD71828_INT_RTC_MASK BIT(BD71828_INT_RTC)
+};
+
+/* Interrupts */
+enum {
+ /* BUCK reg interrupts */
+ BD71828_INT_BUCK1_OCP,
+ BD71828_INT_BUCK2_OCP,
+ BD71828_INT_BUCK3_OCP,
+ BD71828_INT_BUCK4_OCP,
+ BD71828_INT_BUCK5_OCP,
+ BD71828_INT_BUCK6_OCP,
+ BD71828_INT_BUCK7_OCP,
+ BD71828_INT_PGFAULT,
+ /* DCIN1 interrupts */
+ BD71828_INT_DCIN_DET,
+ BD71828_INT_DCIN_RMV,
+ BD71828_INT_CLPS_OUT,
+ BD71828_INT_CLPS_IN,
+ /* DCIN2 interrupts */
+ BD71828_INT_DCIN_MON_RES,
+ BD71828_INT_DCIN_MON_DET,
+ BD71828_INT_LONGPUSH,
+ BD71828_INT_MIDPUSH,
+ BD71828_INT_SHORTPUSH,
+ BD71828_INT_PUSH,
+ BD71828_INT_WDOG,
+ BD71828_INT_SWRESET,
+ /* Vsys */
+ BD71828_INT_VSYS_UV_RES,
+ BD71828_INT_VSYS_UV_DET,
+ BD71828_INT_VSYS_LOW_RES,
+ BD71828_INT_VSYS_LOW_DET,
+ BD71828_INT_VSYS_HALL_IN,
+ BD71828_INT_VSYS_HALL_TOGGLE,
+ BD71828_INT_VSYS_MON_RES,
+ BD71828_INT_VSYS_MON_DET,
+ /* Charger */
+ BD71828_INT_CHG_DCIN_ILIM,
+ BD71828_INT_CHG_TOPOFF_TO_DONE,
+ BD71828_INT_CHG_WDG_TEMP,
+ BD71828_INT_CHG_WDG_TIME,
+ BD71828_INT_CHG_RECHARGE_RES,
+ BD71828_INT_CHG_RECHARGE_DET,
+ BD71828_INT_CHG_RANGED_TEMP_TRANSITION,
+ BD71828_INT_CHG_STATE_TRANSITION,
+ /* Battery */
+ BD71828_INT_BAT_TEMP_NORMAL,
+ BD71828_INT_BAT_TEMP_ERANGE,
+ BD71828_INT_BAT_TEMP_WARN,
+ BD71828_INT_BAT_REMOVED,
+ BD71828_INT_BAT_DETECTED,
+ BD71828_INT_THERM_REMOVED,
+ BD71828_INT_THERM_DETECTED,
+ /* Battery Mon 1 */
+ BD71828_INT_BAT_DEAD,
+ BD71828_INT_BAT_SHORTC_RES,
+ BD71828_INT_BAT_SHORTC_DET,
+ BD71828_INT_BAT_LOW_VOLT_RES,
+ BD71828_INT_BAT_LOW_VOLT_DET,
+ BD71828_INT_BAT_OVER_VOLT_RES,
+ BD71828_INT_BAT_OVER_VOLT_DET,
+ /* Battery Mon 2 */
+ BD71828_INT_BAT_MON_RES,
+ BD71828_INT_BAT_MON_DET,
+ /* Battery Mon 3 (Coulomb counter) */
+ BD71828_INT_BAT_CC_MON1,
+ BD71828_INT_BAT_CC_MON2,
+ BD71828_INT_BAT_CC_MON3,
+ /* Battery Mon 4 */
+ BD71828_INT_BAT_OVER_CURR_1_RES,
+ BD71828_INT_BAT_OVER_CURR_1_DET,
+ BD71828_INT_BAT_OVER_CURR_2_RES,
+ BD71828_INT_BAT_OVER_CURR_2_DET,
+ BD71828_INT_BAT_OVER_CURR_3_RES,
+ BD71828_INT_BAT_OVER_CURR_3_DET,
+ /* Temperature */
+ BD71828_INT_TEMP_BAT_LOW_RES,
+ BD71828_INT_TEMP_BAT_LOW_DET,
+ BD71828_INT_TEMP_BAT_HI_RES,
+ BD71828_INT_TEMP_BAT_HI_DET,
+ BD71828_INT_TEMP_CHIP_OVER_125_RES,
+ BD71828_INT_TEMP_CHIP_OVER_125_DET,
+ BD71828_INT_TEMP_CHIP_OVER_VF_DET,
+ BD71828_INT_TEMP_CHIP_OVER_VF_RES,
+ /* RTC Alarm */
+ BD71828_INT_RTC0,
+ BD71828_INT_RTC1,
+ BD71828_INT_RTC2,
+};
+
+#define BD71828_INT_BUCK1_OCP_MASK 0x1
+#define BD71828_INT_BUCK2_OCP_MASK 0x2
+#define BD71828_INT_BUCK3_OCP_MASK 0x4
+#define BD71828_INT_BUCK4_OCP_MASK 0x8
+#define BD71828_INT_BUCK5_OCP_MASK 0x10
+#define BD71828_INT_BUCK6_OCP_MASK 0x20
+#define BD71828_INT_BUCK7_OCP_MASK 0x40
+#define BD71828_INT_PGFAULT_MASK 0x80
+
+#define BD71828_INT_DCIN_DET_MASK 0x1
+#define BD71828_INT_DCIN_RMV_MASK 0x2
+#define BD71828_INT_CLPS_OUT_MASK 0x4
+#define BD71828_INT_CLPS_IN_MASK 0x8
+ /* DCIN2 interrupts */
+#define BD71828_INT_DCIN_MON_RES_MASK 0x1
+#define BD71828_INT_DCIN_MON_DET_MASK 0x2
+#define BD71828_INT_LONGPUSH_MASK 0x4
+#define BD71828_INT_MIDPUSH_MASK 0x8
+#define BD71828_INT_SHORTPUSH_MASK 0x10
+#define BD71828_INT_PUSH_MASK 0x20
+#define BD71828_INT_WDOG_MASK 0x40
+#define BD71828_INT_SWRESET_MASK 0x80
+ /* Vsys */
+#define BD71828_INT_VSYS_UV_RES_MASK 0x1
+#define BD71828_INT_VSYS_UV_DET_MASK 0x2
+#define BD71828_INT_VSYS_LOW_RES_MASK 0x4
+#define BD71828_INT_VSYS_LOW_DET_MASK 0x8
+#define BD71828_INT_VSYS_HALL_IN_MASK 0x10
+#define BD71828_INT_VSYS_HALL_TOGGLE_MASK 0x20
+#define BD71828_INT_VSYS_MON_RES_MASK 0x40
+#define BD71828_INT_VSYS_MON_DET_MASK 0x80
+ /* Charger */
+#define BD71828_INT_CHG_DCIN_ILIM_MASK 0x1
+#define BD71828_INT_CHG_TOPOFF_TO_DONE_MASK 0x2
+#define BD71828_INT_CHG_WDG_TEMP_MASK 0x4
+#define BD71828_INT_CHG_WDG_TIME_MASK 0x8
+#define BD71828_INT_CHG_RECHARGE_RES_MASK 0x10
+#define BD71828_INT_CHG_RECHARGE_DET_MASK 0x20
+#define BD71828_INT_CHG_RANGED_TEMP_TRANSITION_MASK 0x40
+#define BD71828_INT_CHG_STATE_TRANSITION_MASK 0x80
+ /* Battery */
+#define BD71828_INT_BAT_TEMP_NORMAL_MASK 0x1
+#define BD71828_INT_BAT_TEMP_ERANGE_MASK 0x2
+#define BD71828_INT_BAT_TEMP_WARN_MASK 0x4
+#define BD71828_INT_BAT_REMOVED_MASK 0x10
+#define BD71828_INT_BAT_DETECTED_MASK 0x20
+#define BD71828_INT_THERM_REMOVED_MASK 0x40
+#define BD71828_INT_THERM_DETECTED_MASK 0x80
+ /* Battery Mon 1 */
+#define BD71828_INT_BAT_DEAD_MASK 0x2
+#define BD71828_INT_BAT_SHORTC_RES_MASK 0x4
+#define BD71828_INT_BAT_SHORTC_DET_MASK 0x8
+#define BD71828_INT_BAT_LOW_VOLT_RES_MASK 0x10
+#define BD71828_INT_BAT_LOW_VOLT_DET_MASK 0x20
+#define BD71828_INT_BAT_OVER_VOLT_RES_MASK 0x40
+#define BD71828_INT_BAT_OVER_VOLT_DET_MASK 0x80
+ /* Battery Mon 2 */
+#define BD71828_INT_BAT_MON_RES_MASK 0x1
+#define BD71828_INT_BAT_MON_DET_MASK 0x2
+ /* Battery Mon 3 (Coulomb counter) */
+#define BD71828_INT_BAT_CC_MON1_MASK 0x1
+#define BD71828_INT_BAT_CC_MON2_MASK 0x2
+#define BD71828_INT_BAT_CC_MON3_MASK 0x4
+ /* Battery Mon 4 */
+#define BD71828_INT_BAT_OVER_CURR_1_RES_MASK 0x1
+#define BD71828_INT_BAT_OVER_CURR_1_DET_MASK 0x2
+#define BD71828_INT_BAT_OVER_CURR_2_RES_MASK 0x4
+#define BD71828_INT_BAT_OVER_CURR_2_DET_MASK 0x8
+#define BD71828_INT_BAT_OVER_CURR_3_RES_MASK 0x10
+#define BD71828_INT_BAT_OVER_CURR_3_DET_MASK 0x20
+ /* Temperature */
+#define BD71828_INT_TEMP_BAT_LOW_RES_MASK 0x1
+#define BD71828_INT_TEMP_BAT_LOW_DET_MASK 0x2
+#define BD71828_INT_TEMP_BAT_HI_RES_MASK 0x4
+#define BD71828_INT_TEMP_BAT_HI_DET_MASK 0x8
+#define BD71828_INT_TEMP_CHIP_OVER_125_RES_MASK 0x10
+#define BD71828_INT_TEMP_CHIP_OVER_125_DET_MASK 0x20
+#define BD71828_INT_TEMP_CHIP_OVER_VF_RES_MASK 0x40
+#define BD71828_INT_TEMP_CHIP_OVER_VF_DET_MASK 0x80
+ /* RTC Alarm */
+#define BD71828_INT_RTC0_MASK 0x1
+#define BD71828_INT_RTC1_MASK 0x2
+#define BD71828_INT_RTC2_MASK 0x4
+
+#define BD71828_OUT_TYPE_MASK 0x2
+#define BD71828_OUT_TYPE_OPEN_DRAIN 0x0
+#define BD71828_OUT_TYPE_CMOS 0x2
+
+#endif /* __LINUX_MFD_BD71828_H__ */
diff --git a/include/linux/mfd/rohm-bd718x7.h b/include/linux/mfd/rohm-bd718x7.h
index 7f2dbde402a1..bee2474a8f9f 100644
--- a/include/linux/mfd/rohm-bd718x7.h
+++ b/include/linux/mfd/rohm-bd718x7.h
@@ -191,12 +191,6 @@ enum {
#define IRQ_ON_REQ 0x02
#define IRQ_STBY_REQ 0x01
-/* BD718XX_REG_OUT32K bits */
-#define BD718XX_OUT32K_EN 0x01
-
-/* BD7183XX gated clock rate */
-#define BD718XX_CLK_RATE 32768
-
/* ROHM BD718XX irqs */
enum {
BD718XX_INT_STBY_REQ,
diff --git a/include/linux/mfd/rohm-generic.h b/include/linux/mfd/rohm-generic.h
index bff15ac26f2c..4283b5b33e04 100644
--- a/include/linux/mfd/rohm-generic.h
+++ b/include/linux/mfd/rohm-generic.h
@@ -4,17 +4,83 @@
#ifndef __LINUX_MFD_ROHM_H__
#define __LINUX_MFD_ROHM_H__
-enum {
+#include <linux/regmap.h>
+#include <linux/regulator/driver.h>
+
+enum rohm_chip_type {
ROHM_CHIP_TYPE_BD71837 = 0,
ROHM_CHIP_TYPE_BD71847,
ROHM_CHIP_TYPE_BD70528,
+ ROHM_CHIP_TYPE_BD71828,
ROHM_CHIP_TYPE_AMOUNT
};
struct rohm_regmap_dev {
- unsigned int chip_type;
struct device *dev;
struct regmap *regmap;
};
+enum {
+ ROHM_DVS_LEVEL_UNKNOWN,
+ ROHM_DVS_LEVEL_RUN,
+ ROHM_DVS_LEVEL_IDLE,
+ ROHM_DVS_LEVEL_SUSPEND,
+ ROHM_DVS_LEVEL_LPSR,
+ ROHM_DVS_LEVEL_MAX = ROHM_DVS_LEVEL_LPSR,
+};
+
+/**
+ * struct rohm_dvs_config - dynamic voltage scaling register descriptions
+ *
+ * @level_map: bitmap representing supported run-levels for this
+ * regulator
+ * @run_reg: register address for regulator config at 'run' state
+ * @run_mask: value mask for regulator voltages at 'run' state
+ * @run_on_mask: enable mask for regulator at 'run' state
+ * @idle_reg: register address for regulator config at 'idle' state
+ * @idle_mask: value mask for regulator voltages at 'idle' state
+ * @idle_on_mask: enable mask for regulator at 'idle' state
+ * @suspend_reg: register address for regulator config at 'suspend' state
+ * @suspend_mask: value mask for regulator voltages at 'suspend' state
+ * @suspend_on_mask: enable mask for regulator at 'suspend' state
+ * @lpsr_reg: register address for regulator config at 'lpsr' state
+ * @lpsr_mask: value mask for regulator voltages at 'lpsr' state
+ * @lpsr_on_mask: enable mask for regulator at 'lpsr' state
+ *
+ * Description of ROHM PMICs voltage configuration registers for different
+ * system states. This is used to correctly configure the PMIC at startup
+ * based on values read from DT.
+ */
+struct rohm_dvs_config {
+ uint64_t level_map;
+ unsigned int run_reg;
+ unsigned int run_mask;
+ unsigned int run_on_mask;
+ unsigned int idle_reg;
+ unsigned int idle_mask;
+ unsigned int idle_on_mask;
+ unsigned int suspend_reg;
+ unsigned int suspend_mask;
+ unsigned int suspend_on_mask;
+ unsigned int lpsr_reg;
+ unsigned int lpsr_mask;
+ unsigned int lpsr_on_mask;
+};
+
+#if IS_ENABLED(CONFIG_REGULATOR_ROHM)
+int rohm_regulator_set_dvs_levels(const struct rohm_dvs_config *dvs,
+ struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regmap *regmap);
+
+#else
+static inline int rohm_regulator_set_dvs_levels(const struct rohm_dvs_config *dvs,
+ struct device_node *np,
+ const struct regulator_desc *desc,
+ struct regmap *regmap)
+{
+ return 0;
+}
+#endif
+
#endif
diff --git a/include/linux/mfd/rohm-shared.h b/include/linux/mfd/rohm-shared.h
new file mode 100644
index 000000000000..53dd7f638bfd
--- /dev/null
+++ b/include/linux/mfd/rohm-shared.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* Copyright (C) 2020 ROHM Semiconductors */
+
+
+#ifndef __LINUX_MFD_ROHM_SHARED_H__
+#define __LINUX_MFD_ROHM_SHARED_H__
+
+/* RTC definitions shared between BD70528 and BD71828 */
+
+#define BD70528_MASK_RTC_SEC 0x7f
+#define BD70528_MASK_RTC_MINUTE 0x7f
+#define BD70528_MASK_RTC_HOUR_24H 0x80
+#define BD70528_MASK_RTC_HOUR_PM 0x20
+#define BD70528_MASK_RTC_HOUR 0x3f
+#define BD70528_MASK_RTC_DAY 0x3f
+#define BD70528_MASK_RTC_WEEK 0x07
+#define BD70528_MASK_RTC_MONTH 0x1f
+#define BD70528_MASK_RTC_YEAR 0xff
+#define BD70528_MASK_ALM_EN 0x7
+
+#endif /* __LINUX_MFD_ROHM_SHARED_H__ */
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
index 067d14655c28..f8db83aedb2b 100644
--- a/include/linux/mfd/stm32-timers.h
+++ b/include/linux/mfd/stm32-timers.h
@@ -70,14 +70,11 @@
#define TIM_CCER_CC4E BIT(12) /* Capt/Comp 4 out Ena */
#define TIM_CCER_CC4P BIT(13) /* Capt/Comp 4 Polarity */
#define TIM_CCER_CCXE (BIT(0) | BIT(4) | BIT(8) | BIT(12))
-#define TIM_BDTR_BKE BIT(12) /* Break input enable */
-#define TIM_BDTR_BKP BIT(13) /* Break input polarity */
+#define TIM_BDTR_BKE(x) BIT(12 + (x) * 12) /* Break input enable */
+#define TIM_BDTR_BKP(x) BIT(13 + (x) * 12) /* Break input polarity */
#define TIM_BDTR_AOE BIT(14) /* Automatic Output Enable */
#define TIM_BDTR_MOE BIT(15) /* Main Output Enable */
-#define TIM_BDTR_BKF (BIT(16) | BIT(17) | BIT(18) | BIT(19))
-#define TIM_BDTR_BK2F (BIT(20) | BIT(21) | BIT(22) | BIT(23))
-#define TIM_BDTR_BK2E BIT(24) /* Break 2 input enable */
-#define TIM_BDTR_BK2P BIT(25) /* Break 2 input polarity */
+#define TIM_BDTR_BKF(x) (0xf << (16 + (x) * 4))
#define TIM_DCR_DBA GENMASK(4, 0) /* DMA base addr */
#define TIM_DCR_DBL GENMASK(12, 8) /* DMA burst len */
@@ -87,8 +84,7 @@
#define TIM_CR2_MMS2_SHIFT 20
#define TIM_SMCR_TS_SHIFT 4
#define TIM_BDTR_BKF_MASK 0xF
-#define TIM_BDTR_BKF_SHIFT 16
-#define TIM_BDTR_BK2F_SHIFT 20
+#define TIM_BDTR_BKF_SHIFT(x) (16 + (x) * 4)
enum stm32_timers_dmas {
STM32_TIMERS_DMA_CH1,
diff --git a/include/linux/mfd/syscon.h b/include/linux/mfd/syscon.h
index 112dc66262cc..7f20e9b502a5 100644
--- a/include/linux/mfd/syscon.h
+++ b/include/linux/mfd/syscon.h
@@ -23,6 +23,11 @@ extern struct regmap *syscon_regmap_lookup_by_compatible(const char *s);
extern struct regmap *syscon_regmap_lookup_by_phandle(
struct device_node *np,
const char *property);
+extern struct regmap *syscon_regmap_lookup_by_phandle_args(
+ struct device_node *np,
+ const char *property,
+ int arg_count,
+ unsigned int *out_args);
#else
static inline struct regmap *device_node_to_regmap(struct device_node *np)
{
@@ -45,6 +50,15 @@ static inline struct regmap *syscon_regmap_lookup_by_phandle(
{
return ERR_PTR(-ENOTSUPP);
}
+
+static inline struct regmap *syscon_regmap_lookup_by_phandle_args(
+ struct device_node *np,
+ const char *property,
+ int arg_count,
+ unsigned int *out_args)
+{
+ return ERR_PTR(-ENOTSUPP);
+}
#endif
#endif /* __LINUX_MFD_SYSCON_H__ */
diff --git a/include/linux/mfd/syscon/atmel-matrix.h b/include/linux/mfd/syscon/atmel-matrix.h
index f61cd127a852..20c25665216a 100644
--- a/include/linux/mfd/syscon/atmel-matrix.h
+++ b/include/linux/mfd/syscon/atmel-matrix.h
@@ -106,7 +106,6 @@
#define AT91_MATRIX_DDR_IOSR BIT(18)
#define AT91_MATRIX_NFD0_SELECT BIT(24)
#define AT91_MATRIX_DDR_MP_EN BIT(25)
-#define AT91_MATRIX_EBI_NUM_CS 8
#define AT91_MATRIX_USBPUCR_PUON BIT(30)
diff --git a/include/linux/mfd/tmio.h b/include/linux/mfd/tmio.h
index 739b7bf37eaa..8ba042430d8e 100644
--- a/include/linux/mfd/tmio.h
+++ b/include/linux/mfd/tmio.h
@@ -79,9 +79,6 @@
/* Some controllers have a CBSY bit */
#define TMIO_MMC_HAVE_CBSY BIT(11)
-/* Some controllers that support HS400 use 4 taps while others use 8. */
-#define TMIO_MMC_HAVE_4TAP_HS400 BIT(13)
-
int tmio_core_mmc_enable(void __iomem *cnf, int shift, unsigned long base);
int tmio_core_mmc_resume(void __iomem *cnf, int shift, unsigned long base);
void tmio_core_mmc_pwr(void __iomem *cnf, int shift, int state);
diff --git a/include/linux/mfd/twl.h b/include/linux/mfd/twl.h
index 44aff52a5002..089e8942223a 100644
--- a/include/linux/mfd/twl.h
+++ b/include/linux/mfd/twl.h
@@ -181,14 +181,18 @@ static inline int twl_i2c_read_u8(u8 mod_no, u8 *val, u8 reg) {
}
static inline int twl_i2c_write_u16(u8 mod_no, u16 val, u8 reg) {
- val = cpu_to_le16(val);
- return twl_i2c_write(mod_no, (u8*) &val, reg, 2);
+ __le16 value;
+
+ value = cpu_to_le16(val);
+ return twl_i2c_write(mod_no, (u8 *) &value, reg, 2);
}
static inline int twl_i2c_read_u16(u8 mod_no, u16 *val, u8 reg) {
int ret;
- ret = twl_i2c_read(mod_no, (u8*) val, reg, 2);
- *val = le16_to_cpu(*val);
+ __le16 value;
+
+ ret = twl_i2c_read(mod_no, (u8 *) &value, reg, 2);
+ *val = le16_to_cpu(value);
return ret;
}
diff --git a/include/linux/mfd/wcd934x/registers.h b/include/linux/mfd/wcd934x/registers.h
new file mode 100644
index 000000000000..bb8d2e276668
--- /dev/null
+++ b/include/linux/mfd/wcd934x/registers.h
@@ -0,0 +1,531 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _WCD934X_REGISTERS_H
+#define _WCD934X_REGISTERS_H
+
+#define WCD934X_CODEC_RPM_CLK_GATE 0x0002
+#define WCD934X_CODEC_RPM_CLK_GATE_MASK GENMASK(1, 0)
+#define WCD934X_CODEC_RPM_CLK_MCLK_CFG 0x0003
+#define WCD934X_CODEC_RPM_CLK_MCLK_CFG_9P6MHZ BIT(0)
+#define WCD934X_CODEC_RPM_CLK_MCLK_CFG_12P288MHZ BIT(1)
+#define WCD934X_CODEC_RPM_CLK_MCLK_CFG_MCLK_MASK GENMASK(1, 0)
+#define WCD934X_CODEC_RPM_RST_CTL 0x0009
+#define WCD934X_CODEC_RPM_PWR_CDC_DIG_HM_CTL 0x0011
+#define WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0 0x0021
+#define WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE2 0x0023
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_CTL 0x0025
+#define WCD934X_EFUSE_SENSE_STATE_MASK GENMASK(4, 1)
+#define WCD934X_EFUSE_SENSE_STATE_DEF 0x10
+#define WCD934X_EFUSE_SENSE_EN_MASK BIT(0)
+#define WCD934X_EFUSE_SENSE_ENABLE BIT(0)
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT14 0x0037
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_VAL_OUT15 0x0038
+#define WCD934X_CHIP_TIER_CTRL_EFUSE_STATUS 0x0039
+#define WCD934X_DATA_HUB_SB_TX10_INP_CFG 0x006b
+#define WCD934X_DATA_HUB_SB_TX11_INP_CFG 0x006c
+#define WCD934X_DATA_HUB_SB_TX13_INP_CFG 0x006e
+#define WCD934X_CPE_FLL_CONFIG_CTL_2 0x0111
+#define WCD934X_CPE_SS_CPARMAD_BUFRDY_INT_PERIOD 0x0213
+#define WCD934X_CPE_SS_SVA_CFG 0x0214
+#define WCD934X_CPE_SS_DMIC0_CTL 0x0218
+#define WCD934X_CPE_SS_DMIC1_CTL 0x0219
+#define WCD934X_DMIC_RATE_MASK GENMASK(3, 1)
+#define WCD934X_CPE_SS_DMIC2_CTL 0x021a
+#define WCD934X_CPE_SS_DMIC_CFG 0x021b
+#define WCD934X_CPE_SS_DMIC_CFG 0x021b
+#define WCD934X_CPE_SS_CPAR_CFG 0x021c
+#define WCD934X_INTR_PIN1_MASK0 0x0409
+#define WCD934X_INTR_PIN1_STATUS0 0x0411
+#define WCD934X_INTR_PIN1_CLEAR0 0x0419
+#define WCD934X_INTR_PIN2_CLEAR3 0x0434
+#define WCD934X_INTR_LEVEL0 0x0461
+/* INTR_REG 0 */
+#define WCD934X_IRQ_SLIMBUS 0
+#define WCD934X_IRQ_MISC 1
+#define WCD934X_IRQ_HPH_PA_OCPL_FAULT 2
+#define WCD934X_IRQ_HPH_PA_OCPR_FAULT 3
+#define WCD934X_IRQ_EAR_PA_OCP_FAULT 4
+#define WCD934X_IRQ_HPH_PA_CNPL_COMPLETE 5
+#define WCD934X_IRQ_HPH_PA_CNPR_COMPLETE 6
+#define WCD934X_IRQ_EAR_PA_CNP_COMPLETE 7
+/* INTR_REG 1 */
+#define WCD934X_IRQ_MBHC_SW_DET 8
+#define WCD934X_IRQ_MBHC_ELECT_INS_REM_DET 9
+#define WCD934X_IRQ_MBHC_BUTTON_PRESS_DET 10
+#define WCD934X_IRQ_MBHC_BUTTON_RELEASE_DET 11
+#define WCD934X_IRQ_MBHC_ELECT_INS_REM_LEG_DET 12
+#define WCD934X_IRQ_RESERVED_0 13
+#define WCD934X_IRQ_RESERVED_1 14
+#define WCD934X_IRQ_RESERVED_2 15
+/* INTR_REG 2 */
+#define WCD934X_IRQ_LINE_PA1_CNP_COMPLETE 16
+#define WCD934X_IRQ_LINE_PA2_CNP_COMPLETE 17
+#define WCD934X_IRQ_SLNQ_ANALOG_ERROR 18
+#define WCD934X_IRQ_RESERVED_3 19
+#define WCD934X_IRQ_SOUNDWIRE 20
+#define WCD934X_IRQ_VDD_DIG_RAMP_COMPLETE 21
+#define WCD934X_IRQ_RCO_ERROR 22
+#define WCD934X_IRQ_CPE_ERROR 23
+/* INTR_REG 3 */
+#define WCD934X_IRQ_MAD_AUDIO 24
+#define WCD934X_IRQ_MAD_BEACON 25
+#define WCD934X_IRQ_MAD_ULTRASOUND 26
+#define WCD934X_IRQ_VBAT_ATTACK 27
+#define WCD934X_IRQ_VBAT_RESTORE 28
+#define WCD934X_IRQ_CPE1_INTR 29
+#define WCD934X_IRQ_RESERVED_4 30
+#define WCD934X_IRQ_SLNQ_DIGITAL 31
+#define WCD934X_NUM_IRQS 32
+#define WCD934X_ANA_BIAS 0x0601
+#define WCD934X_ANA_BIAS_EN_MASK BIT(7)
+#define WCD934X_ANA_BIAS_EN BIT(7)
+#define WCD934X_ANA_PRECHRG_EN_MASK BIT(6)
+#define WCD934X_ANA_PRECHRG_EN BIT(6)
+#define WCD934X_ANA_PRECHRG_MODE_MASK BIT(5)
+#define WCD934X_ANA_PRECHRG_MODE_AUTO BIT(5)
+#define WCD934X_ANA_RCO 0x0603
+#define WCD934X_ANA_RCO_BG_EN_MASK BIT(7)
+#define WCD934X_ANA_RCO_BG_ENABLE BIT(7)
+#define WCD934X_ANA_BUCK_CTL 0x0606
+#define WCD934X_ANA_BUCK_HI_ACCU_PRE_ENX_MASK GENMASK(1, 0)
+#define WCD934X_ANA_BUCK_PRE_EN2_MASK BIT(0)
+#define WCD934X_ANA_BUCK_PRE_EN2_ENABLE BIT(0)
+#define WCD934X_ANA_BUCK_PRE_EN1_MASK BIT(1)
+#define WCD934X_ANA_BUCK_PRE_EN1_ENABLE BIT(1)
+#define WCD934X_ANA_BUCK_HI_ACCU_EN_MASK BIT(2)
+#define WCD934X_ANA_BUCK_HI_ACCU_ENABLE BIT(2)
+#define WCD934X_ANA_RX_SUPPLIES 0x0608
+#define WCD934X_ANA_HPH 0x0609
+#define WCD934X_ANA_EAR 0x060a
+#define WCD934X_ANA_LO_1_2 0x060b
+#define WCD934X_ANA_AMIC1 0x060e
+#define WCD934X_ANA_AMIC2 0x060f
+#define WCD934X_ANA_AMIC3 0x0610
+#define WCD934X_ANA_AMIC4 0x0611
+#define WCD934X_ANA_MBHC_MECH 0x0614
+#define WCD934X_ANA_MBHC_ELECT 0x0615
+#define WCD934X_ANA_MBHC_ZDET 0x0616
+#define WCD934X_ANA_MBHC_RESULT_1 0x0617
+#define WCD934X_ANA_MBHC_RESULT_2 0x0618
+#define WCD934X_ANA_MBHC_RESULT_3 0x0619
+#define WCD934X_ANA_MICB1 0x0622
+#define WCD934X_MICB_VAL_MASK GENMASK(5, 0)
+#define WCD934X_ANA_MICB_EN_MASK GENMASK(7, 6)
+#define WCD934X_ANA_MICB_PULL_UP 0x80
+#define WCD934X_ANA_MICB_ENABLE 0x40
+#define WCD934X_ANA_MICB_DISABLE 0x0
+#define WCD934X_ANA_MICB2 0x0623
+#define WCD934X_ANA_MICB3 0x0625
+#define WCD934X_ANA_MICB4 0x0626
+#define WCD934X_BIAS_VBG_FINE_ADJ 0x0629
+#define WCD934X_MICB1_TEST_CTL_1 0x066b
+#define WCD934X_MICB1_TEST_CTL_2 0x066c
+#define WCD934X_MICB2_TEST_CTL_1 0x066e
+#define WCD934X_MICB3_TEST_CTL_1 0x0671
+#define WCD934X_MICB4_TEST_CTL_1 0x0674
+#define WCD934X_CLASSH_MODE_1 0x0697
+#define WCD934X_CLASSH_MODE_2 0x0698
+#define WCD934X_CLASSH_MODE_3 0x0699
+#define WCD934X_CLASSH_CTRL_VCL_1 0x069a
+#define WCD934X_CLASSH_CTRL_VCL_2 0x069b
+#define WCD934X_CLASSH_CTRL_CCL_1 0x069c
+#define WCD934X_CLASSH_CTRL_CCL_2 0x069d
+#define WCD934X_CLASSH_CTRL_CCL_3 0x069e
+#define WCD934X_CLASSH_CTRL_CCL_4 0x069f
+#define WCD934X_CLASSH_CTRL_CCL_5 0x06a0
+#define WCD934X_CLASSH_BUCK_TMUX_A_D 0x06a1
+#define WCD934X_CLASSH_BUCK_SW_DRV_CNTL 0x06a2
+#define WCD934X_RX_OCP_CTL 0x06b6
+#define WCD934X_RX_OCP_COUNT 0x06b7
+#define WCD934X_HPH_CNP_EN 0x06cb
+#define WCD934X_HPH_CNP_WG_CTL 0x06cc
+#define WCD934X_HPH_GM3_BOOST_EN_MASK BIT(7)
+#define WCD934X_HPH_GM3_BOOST_ENABLE BIT(7)
+#define WCD934X_HPH_OCP_CTL 0x06ce
+#define WCD934X_HPH_L_EN 0x06d3
+#define WCD934X_HPH_GAIN_SRC_SEL_MASK BIT(5)
+#define WCD934X_HPH_GAIN_SRC_SEL_COMPANDER 0
+#define WCD934X_HPH_GAIN_SRC_SEL_REGISTER BIT(5)
+#define WCD934X_HPH_L_TEST 0x06d4
+#define WCD934X_HPH_R_EN 0x06d6
+#define WCD934X_HPH_R_TEST 0x06d7
+#define WCD934X_HPH_OCP_DET_MASK BIT(0)
+#define WCD934X_HPH_OCP_DET_ENABLE BIT(0)
+#define WCD934X_HPH_OCP_DET_DISABLE 0
+#define WCD934X_DIFF_LO_LO2_COMPANDER 0x06ea
+#define WCD934X_DIFF_LO_LO1_COMPANDER 0x06eb
+#define WCD934X_CLK_SYS_MCLK_PRG 0x0711
+#define WCD934X_EXT_CLK_BUF_EN_MASK BIT(7)
+#define WCD934X_EXT_CLK_BUF_EN BIT(7)
+#define WCD934X_EXT_CLK_DIV_RATIO_MASK GENMASK(5, 4)
+#define WCD934X_EXT_CLK_DIV_BY_2 0x10
+#define WCD934X_MCLK_SRC_MASK BIT(1)
+#define WCD934X_MCLK_SRC_EXT_CLK 0
+#define WCD934X_MCLK_SRC_MASK BIT(1)
+#define WCD934X_MCLK_EN_MASK BIT(0)
+#define WCD934X_MCLK_EN BIT(0)
+#define WCD934X_CLK_SYS_MCLK2_PRG1 0x0712
+#define WCD934X_CLK_SYS_MCLK2_PRG2 0x0713
+#define WCD934X_SIDO_NEW_VOUT_A_STARTUP 0x071b
+#define WCD934X_SIDO_NEW_VOUT_D_STARTUP 0x071c
+#define WCD934X_SIDO_NEW_VOUT_D_FREQ1 0x071d
+#define WCD934X_SIDO_NEW_VOUT_D_FREQ2 0x071e
+#define WCD934X_SIDO_RIPPLE_FREQ_EN_MASK BIT(0)
+#define WCD934X_SIDO_RIPPLE_FREQ_ENABLE BIT(0)
+#define WCD934X_MBHC_NEW_CTL_2 0x0721
+#define WCD934X_TX_NEW_AMIC_4_5_SEL 0x0727
+#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L 0x0733
+#define WCD934X_HPH_NEW_INT_RDAC_OVERRIDE_CTL 0x0735
+#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_R 0x0736
+#define WCD934X_HPH_NEW_INT_HPH_TIMER1 0x073a
+#define WCD934X_HPH_AUTOCHOP_TIMER_EN_MASK BIT(1)
+#define WCD934X_HPH_AUTOCHOP_TIMER_ENABLE BIT(1)
+#define WCD934X_CDC_TX0_TX_PATH_CTL 0x0a31
+#define WCD934X_CDC_TX_PATH_CTL_PCM_RATE_MASK GENMASK(3, 0)
+#define WCD934X_CDC_TX_PATH_CTL(dec) (0xa31 + dec * 0x10)
+#define WCD934X_CDC_TX0_TX_PATH_CFG0 0x0a32
+#define WCD934X_CDC_TX0_TX_PATH_CFG1 0x0a33
+#define WCD934X_CDC_TX0_TX_VOL_CTL 0x0a34
+#define WCD934X_CDC_TX0_TX_PATH_192_CTL 0x0a35
+#define WCD934X_CDC_TX0_TX_PATH_192_CFG 0x0a36
+#define WCD934X_CDC_TX0_TX_PATH_SEC2 0x0a39
+#define WCD934X_HPH_CUTOFF_FREQ_CHANGE_REQ_MASK BIT(1)
+#define WCD934X_HPH_CUTOFF_FREQ_CHANGE_REQ BIT(1)
+#define WCD934X_CDC_TX1_TX_PATH_CTL 0x0a41
+#define WCD934X_CDC_TX1_TX_PATH_CFG0 0x0a42
+#define WCD934X_CDC_TX1_TX_PATH_CFG1 0x0a43
+#define WCD934X_CDC_TX1_TX_VOL_CTL 0x0a44
+#define WCD934X_CDC_TX2_TX_PATH_CTL 0x0a51
+#define WCD934X_CDC_TX2_TX_PATH_CFG0 0x0a52
+#define WCD934X_CDC_TX2_TX_PATH_CFG1 0x0a53
+#define WCD934X_CDC_TX2_TX_VOL_CTL 0x0a54
+#define WCD934X_CDC_TX3_TX_PATH_CTL 0x0a61
+#define WCD934X_CDC_TX3_TX_PATH_CFG0 0x0a62
+#define WCD934X_CDC_TX3_TX_PATH_CFG1 0x0a63
+#define WCD934X_CDC_TX3_TX_VOL_CTL 0x0a64
+#define WCD934X_CDC_TX3_TX_PATH_192_CTL 0x0a65
+#define WCD934X_CDC_TX3_TX_PATH_192_CFG 0x0a66
+#define WCD934X_CDC_TX4_TX_PATH_CTL 0x0a71
+#define WCD934X_CDC_TX4_TX_PATH_CFG0 0x0a72
+#define WCD934X_CDC_TX4_TX_PATH_CFG1 0x0a73
+#define WCD934X_CDC_TX4_TX_VOL_CTL 0x0a74
+#define WCD934X_CDC_TX4_TX_PATH_192_CTL 0x0a75
+#define WCD934X_CDC_TX4_TX_PATH_192_CFG 0x0a76
+#define WCD934X_CDC_TX5_TX_PATH_CTL 0x0a81
+#define WCD934X_CDC_TX5_TX_PATH_CFG0 0x0a82
+#define WCD934X_CDC_TX5_TX_PATH_CFG1 0x0a83
+#define WCD934X_CDC_TX5_TX_VOL_CTL 0x0a84
+#define WCD934X_CDC_TX5_TX_PATH_192_CTL 0x0a85
+#define WCD934X_CDC_TX5_TX_PATH_192_CFG 0x0a86
+#define WCD934X_CDC_TX6_TX_PATH_CTL 0x0a91
+#define WCD934X_CDC_TX6_TX_PATH_CFG0 0x0a92
+#define WCD934X_CDC_TX6_TX_PATH_CFG1 0x0a93
+#define WCD934X_CDC_TX6_TX_VOL_CTL 0x0a94
+#define WCD934X_CDC_TX6_TX_PATH_192_CTL 0x0a95
+#define WCD934X_CDC_TX6_TX_PATH_192_CFG 0x0a96
+#define WCD934X_CDC_TX7_TX_PATH_CTL 0x0aa1
+#define WCD934X_CDC_TX7_TX_PATH_CFG0 0x0aa2
+#define WCD934X_CDC_TX7_TX_PATH_CFG1 0x0aa3
+#define WCD934X_CDC_TX7_TX_VOL_CTL 0x0aa4
+#define WCD934X_CDC_TX7_TX_PATH_192_CTL 0x0aa5
+#define WCD934X_CDC_TX7_TX_PATH_192_CFG 0x0aa6
+#define WCD934X_CDC_TX8_TX_PATH_CTL 0x0ab1
+#define WCD934X_CDC_TX8_TX_PATH_CFG0 0x0ab2
+#define WCD934X_CDC_TX8_TX_PATH_CFG1 0x0ab3
+#define WCD934X_CDC_TX8_TX_VOL_CTL 0x0ab4
+#define WCD934X_CDC_TX8_TX_PATH_192_CTL 0x0ab5
+#define WCD934X_CDC_TX8_TX_PATH_192_CFG 0x0ab6
+#define WCD934X_CDC_TX9_SPKR_PROT_PATH_CFG0 0x0ac3
+#define WCD934X_CDC_TX10_SPKR_PROT_PATH_CFG0 0x0ac7
+#define WCD934X_CDC_TX11_SPKR_PROT_PATH_CFG0 0x0acb
+#define WCD934X_CDC_TX12_SPKR_PROT_PATH_CFG0 0x0acf
+#define WCD934X_CDC_COMPANDER1_CTL0 0x0b01
+#define WCD934X_COMP_CLK_EN_MASK BIT(0)
+#define WCD934X_COMP_CLK_ENABLE BIT(0)
+#define WCD934X_COMP_SOFT_RST_MASK BIT(1)
+#define WCD934X_COMP_SOFT_RST_ENABLE BIT(1)
+#define WCD934X_COMP_HALT_MASK BIT(2)
+#define WCD934X_COMP_HALT BIT(2)
+#define WCD934X_COMP_SOFT_RST_DISABLE 0
+#define WCD934X_CDC_COMPANDER1_CTL7 0x0b08
+#define WCD934X_HPH_LOW_PWR_MODE_EN_MASK BIT(5)
+#define WCD934X_CDC_COMPANDER2_CTL7 0x0b10
+#define WCD934X_CDC_COMPANDER7_CTL3 0x0b34
+#define WCD934X_CDC_COMPANDER7_CTL7 0x0b38
+#define WCD934X_CDC_COMPANDER8_CTL3 0x0b3c
+#define WCD934X_CDC_COMPANDER8_CTL7 0x0b40
+#define WCD934X_CDC_RX0_RX_PATH_CTL 0x0b41
+#define WCD934X_CDC_RX_PGA_MUTE_EN_MASK BIT(4)
+#define WCD934X_CDC_RX_PGA_MUTE_ENABLE BIT(4)
+#define WCD934X_CDC_RX_PGA_MUTE_DISABLE 0
+#define WCD934X_RX_CLK_EN_MASK BIT(5)
+#define WCD934X_RX_CLK_ENABLE BIT(5)
+#define WCD934X_RX_RESET_MASK BIT(6)
+#define WCD934X_RX_RESET_ENABLE BIT(6)
+#define WCD934X_RX_RESET_DISABLE 0
+#define WCD934X_RX_PCM_RATE_MASK GENMASK(3, 0)
+#define WCD934X_RX_PCM_RATE_F_48K 0x04
+#define WCD934X_CDC_RX_PATH_CTL(rx) (0xb41 + rx * 0x14)
+#define WCD934X_CDC_MIX_PCM_RATE_MASK GENMASK(3, 0)
+#define WCD934X_CDC_RX0_RX_PATH_CFG0 0x0b42
+#define WCD934X_RX_DLY_ZN_EN_MASK BIT(3)
+#define WCD934X_RX_DLY_ZN_ENABLE BIT(3)
+#define WCD934X_RX_DLY_ZN_DISABLE 0
+#define WCD934X_CDC_RX0_RX_PATH_CFG1 0x0b43
+#define WCD934X_CDC_RX0_RX_PATH_CFG2 0x0b44
+#define WCD934X_CDC_RX0_RX_VOL_CTL 0x0b45
+#define WCD934X_CDC_RX0_RX_PATH_MIX_CTL 0x0b46
+#define WCD934X_CDC_RX_MIX_CLK_EN_MASK BIT(5)
+#define WCD934X_CDC_RX_MIX_CLK_ENABLE BIT(5)
+#define WCD934X_CDC_RX_PATH_MIX_CTL(rx) (0xb46 + rx * 0x14)
+#define WCD934X_CDC_RX0_RX_PATH_MIX_CFG 0x0b47
+#define WCD934X_CDC_RX0_RX_VOL_MIX_CTL 0x0b48
+#define WCD934X_CDC_RX0_RX_PATH_SEC0 0x0b49
+#define WCD934X_CDC_RX0_RX_PATH_DSMDEM_CTL 0x0b53
+#define WCD934X_CDC_RX1_RX_PATH_CTL 0x0b55
+#define WCD934X_RX_PATH_PGA_MUTE_EN_MASK BIT(4)
+#define WCD934X_RX_PATH_PGA_MUTE_ENABLE BIT(4)
+#define WCD934X_CDC_RX_PATH_PGA_MUTE_DISABLE 0
+#define WCD934X_CDC_RX_PATH_CLK_EN_MASK BIT(5)
+#define WCD934X_CDC_RX_PATH_CLK_ENABLE BIT(5)
+#define WCD934X_CDC_RX_PATH_CLK_DISABLE 0
+#define WCD934X_CDC_RX1_RX_PATH_CFG0 0x0b56
+#define WCD934X_HPH_CMP_EN_MASK BIT(1)
+#define WCD934X_HPH_CMP_ENABLE BIT(1)
+#define WCD934X_HPH_CMP_DISABLE 0
+#define WCD934X_CDC_RX1_RX_PATH_CFG2 0x0b58
+#define WCD934X_CDC_RX1_RX_VOL_CTL 0x0b59
+#define WCD934X_CDC_RX1_RX_PATH_MIX_CTL 0x0b5a
+#define WCD934X_CDC_RX1_RX_PATH_MIX_CFG 0x0b5b
+#define WCD934X_CDC_RX1_RX_VOL_MIX_CTL 0x0b5c
+#define WCD934X_CDC_RX1_RX_PATH_SEC0 0x0b5d
+#define WCD934X_CDC_RX1_RX_PATH_SEC3 0x0b60
+#define WCD934X_CDC_RX_PATH_SEC_HD2_ALPHA_MASK GENMASK(5, 2)
+#define WCD934X_CDC_RX_PATH_SEC_HD2_ALPHA_0P3125 0x14
+#define WCD934X_CDC_RX_PATH_SEC_HD2_ALPHA_0P0000 0
+#define WCD934X_CDC_RX1_RX_PATH_DSMDEM_CTL 0x0b67
+#define WCD934X_CDC_RX2_RX_PATH_CTL 0x0b69
+#define WCD934X_CDC_RX2_RX_PATH_CFG0 0x0b6a
+#define WCD934X_CDC_RX_PATH_CFG_HD2_EN_MASK BIT(2)
+#define WCD934X_CDC_RX_PATH_CFG_HD2_ENABLE BIT(2)
+#define WCD934X_CDC_RX_PATH_CFG_HD2_DISABLE 0
+#define WCD934X_CDC_RX2_RX_PATH_CFG2 0x0b6c
+#define WCD934X_CDC_RX2_RX_VOL_CTL 0x0b6d
+#define WCD934X_CDC_RX2_RX_PATH_MIX_CTL 0x0b6e
+#define WCD934X_CDC_RX2_RX_PATH_MIX_CFG 0x0b6f
+#define WCD934X_CDC_RX2_RX_VOL_MIX_CTL 0x0b70
+#define WCD934X_CDC_RX2_RX_PATH_SEC0 0x0b71
+#define WCD934X_CDC_RX2_RX_PATH_SEC3 0x0b74
+#define WCD934X_CDC_RX2_RX_PATH_DSMDEM_CTL 0x0b7b
+#define WCD934X_CDC_RX3_RX_PATH_CTL 0x0b7d
+#define WCD934X_CDC_RX3_RX_PATH_CFG0 0x0b6e
+#define WCD934X_CDC_RX3_RX_PATH_CFG2 0x0b80
+#define WCD934X_CDC_RX3_RX_VOL_CTL 0x0b81
+#define WCD934X_CDC_RX3_RX_PATH_MIX_CTL 0x0b82
+#define WCD934X_CDC_RX3_RX_PATH_MIX_CFG 0x0b83
+#define WCD934X_CDC_RX3_RX_VOL_MIX_CTL 0x0b84
+#define WCD934X_CDC_RX3_RX_PATH_SEC0 0x0b85
+#define WCD934X_CDC_RX3_RX_PATH_DSMDEM_CTL 0x0b8f
+#define WCD934X_CDC_RX4_RX_PATH_CTL 0x0b91
+#define WCD934X_CDC_RX4_RX_PATH_CFG0 0x0b92
+#define WCD934X_CDC_RX4_RX_PATH_CFG2 0x0b94
+#define WCD934X_CDC_RX4_RX_VOL_CTL 0x0b95
+#define WCD934X_CDC_RX4_RX_PATH_MIX_CTL 0x0b96
+#define WCD934X_CDC_RX4_RX_PATH_MIX_CFG 0x0b97
+#define WCD934X_CDC_RX4_RX_VOL_MIX_CTL 0x0b98
+#define WCD934X_CDC_RX4_RX_PATH_SEC0 0x0b99
+#define WCD934X_CDC_RX4_RX_PATH_DSMDEM_CTL 0x0ba3
+#define WCD934X_CDC_RX7_RX_PATH_CTL 0x0bcd
+#define WCD934X_CDC_RX7_RX_PATH_CFG0 0x0bce
+#define WCD934X_CDC_RX7_RX_PATH_CFG1 0x0bcf
+#define WCD934X_CDC_RX7_RX_PATH_CFG2 0x0bd0
+#define WCD934X_CDC_RX7_RX_VOL_CTL 0x0bd1
+#define WCD934X_CDC_RX7_RX_PATH_MIX_CTL 0x0bd2
+#define WCD934X_CDC_RX7_RX_PATH_MIX_CFG 0x0bd3
+#define WCD934X_CDC_RX7_RX_VOL_MIX_CTL 0x0bd4
+#define WCD934X_CDC_RX7_RX_PATH_SEC1 0x0bd6
+#define WCD934X_CDC_RX7_RX_PATH_MIX_SEC0 0x0bdd
+#define WCD934X_CDC_RX7_RX_PATH_DSMDEM_CTL 0x0bdf
+#define WCD934X_CDC_RX8_RX_PATH_CTL 0x0be1
+#define WCD934X_CDC_RX8_RX_PATH_CFG0 0x0be2
+#define WCD934X_CDC_RX8_RX_PATH_CFG1 0x0be3
+#define WCD934X_RX_SMART_BOOST_EN_MASK BIT(0)
+#define WCD934X_RX_SMART_BOOST_ENABLE BIT(0)
+#define WCD934X_RX_SMART_BOOST_DISABLE 0
+#define WCD934X_CDC_RX8_RX_PATH_CFG2 0x0be4
+#define WCD934X_CDC_RX8_RX_VOL_CTL 0x0be5
+#define WCD934X_CDC_RX8_RX_PATH_MIX_CTL 0x0be6
+#define WCD934X_CDC_RX8_RX_PATH_MIX_CFG 0x0be7
+#define WCD934X_CDC_RX8_RX_VOL_MIX_CTL 0x0be8
+#define WCD934X_CDC_RX8_RX_PATH_SEC1 0x0bea
+#define WCD934X_CDC_RX8_RX_PATH_MIX_SEC0 0x0bf1
+#define WCD934X_CDC_RX8_RX_PATH_DSMDEM_CTL 0x0bf3
+#define WCD934X_CDC_CLSH_DECAY_CTRL 0x0c03
+#define WCD934X_CDC_CLSH_K2_MSB 0x0c0a
+#define WCD934X_CDC_CLSH_K2_LSB 0x0c0b
+#define WCD934X_CDC_CLSH_TEST0 0x0c0f
+#define WCD934X_CDC_BOOST0_BOOST_PATH_CTL 0x0c19
+#define WCD934X_BOOST_PATH_CLK_EN_MASK BIT(4)
+#define WCD934X_BOOST_PATH_CLK_ENABLE BIT(4)
+#define WCD934X_BOOST_PATH_CLK_DISABLE 0
+#define WCD934X_CDC_BOOST0_BOOST_CTL 0x0c1a
+#define WCD934X_CDC_BOOST0_BOOST_CFG1 0x0c1b
+#define WCD934X_CDC_BOOST0_BOOST_CFG2 0x0c1c
+#define WCD934X_CDC_BOOST1_BOOST_PATH_CTL 0x0c21
+#define WCD934X_CDC_BOOST1_BOOST_CTL 0x0c22
+#define WCD934X_CDC_BOOST1_BOOST_CFG1 0x0c23
+#define WCD934X_CDC_BOOST1_BOOST_CFG2 0x0c24
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_0 0x0c91
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_1 0x0c92
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_2 0x0c93
+#define WCD934X_SWR_AHB_BRIDGE_RD_DATA_3 0x0c94
+#define WCD934X_SWR_AHB_BRIDGE_ACCESS_STATUS 0x0c96
+#define WCD934X_CDC_SIDETONE_SRC0_ST_SRC_PATH_CTL 0x0cb5
+#define WCD934X_CDC_SIDETONE_SRC1_ST_SRC_PATH_CTL 0x0cb9
+#define WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG0 0x0d01
+#define WCD934X_CDC_RX_INP_MUX_RX_INT_CFG0(i) (0xd01 + i * 0x2)
+#define WCD934X_CDC_RX_INP_MUX_RX_INT_SEL_MASK GENMASK(3, 0)
+#define WCD934X_CDC_RX_INP_MUX_RX_INT0_CFG1 0x0d02
+#define WCD934X_CDC_RX_INP_MUX_RX_INT_CFG1(i) (0xd02 + i * 0x2)
+#define WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG0 0x0d03
+#define WCD934X_CDC_RX_INP_MUX_RX_INT1_CFG1 0x0d04
+#define WCD934X_CDC_RX_INP_MUX_RX_INT2_CFG0 0x0d05
+#define WCD934X_CDC_RX_INP_MUX_RX_INT2_CFG1 0x0d06
+#define WCD934X_CDC_RX_INP_MUX_RX_INT3_CFG0 0x0d07
+#define WCD934X_CDC_RX_INP_MUX_RX_INT3_CFG1 0x0d08
+#define WCD934X_CDC_RX_INP_MUX_RX_INT4_CFG0 0x0d09
+#define WCD934X_CDC_RX_INP_MUX_RX_INT4_CFG1 0x0d0a
+#define WCD934X_CDC_RX_INP_MUX_RX_INT7_CFG0 0x0d0f
+#define WCD934X_CDC_RX_INP_MUX_RX_INT7_CFG1 0x0d10
+#define WCD934X_CDC_RX_INP_MUX_RX_INT8_CFG0 0x0d11
+#define WCD934X_CDC_RX_INP_MUX_RX_INT8_CFG1 0x0d12
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG0 0x0d13
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG1 0x0d14
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG2 0x0d15
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG3 0x0d16
+#define WCD934X_CDC_RX_INP_MUX_RX_MIX_CFG4 0x0d17
+#define WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG0 0x0d18
+#define WCD934X_CDC_RX_INP_MUX_SIDETONE_SRC_CFG1 0x0d19
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG0 0x0d1d
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX0_CFG1 0x0d1e
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG0 0x0d1f
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX1_CFG1 0x0d20
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG0 0x0d21
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX2_CFG1 0x0d22
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG0 0x0d23
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX3_CFG1 0x0d25
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX4_CFG0 0x0d26
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX5_CFG0 0x0d27
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX6_CFG0 0x0d28
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX7_CFG0 0x0d29
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX8_CFG0 0x0d2a
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX10_CFG0 0x0d2b
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX11_CFG0 0x0d2c
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX12_CFG0 0x0d2d
+#define WCD934X_CDC_TX_INP_MUX_ADC_MUX13_CFG0 0x0d2e
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG0 0x0d31
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG1 0x0d32
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG2 0x0d33
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR0_MIX_CFG3 0x0d34
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG0 0x0d35
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG1 0x0d36
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG2 0x0d37
+#define WCD934X_CDC_SIDETONE_IIR_INP_MUX_IIR1_MIX_CFG3 0x0d38
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG0 0x0d3a
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG1 0x0d3b
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG2 0x0d3c
+#define WCD934X_CDC_IF_ROUTER_TX_MUX_CFG3 0x0d3d
+#define WCD934X_CDC_CLK_RST_CTRL_MCLK_CONTROL 0x0d41
+#define WCD934X_CDC_MCLK_EN_MASK BIT(0)
+#define WCD934X_CDC_MCLK_EN_ENABLE BIT(0)
+#define WCD934X_CDC_CLK_RST_CTRL_FS_CNT_CONTROL 0x0d42
+#define WCD934X_CDC_FS_MCLK_CNT_EN_MASK BIT(0)
+#define WCD934X_CDC_FS_MCLK_CNT_ENABLE BIT(0)
+#define WCD934X_CDC_CLK_RST_CTRL_SWR_CONTROL 0x0d43
+#define WCD934X_CDC_SWR_CLK_EN_MASK BIT(0)
+#define WCD934X_CDC_SWR_CLK_ENABLE BIT(0)
+#define WCD934X_CDC_CLK_RST_CTRL_DSD_CONTROL 0x0d44
+#define WCD934X_CDC_CLK_RST_CTRL_ASRC_SHARE_CONTROL 0x0d45
+#define WCD934X_CDC_CLK_RST_CTRL_GFM_CONTROL 0x0d46
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_PATH_CTL 0x0d55
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B1_CTL 0x0d56
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B2_CTL 0x0d57
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B3_CTL 0x0d58
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B4_CTL 0x0d59
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B5_CTL 0x0d5a
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B6_CTL 0x0d5b
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B7_CTL 0x0d5c
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_B8_CTL 0x0d5d
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_CTL 0x0d5e
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_GAIN_TIMER_CTL 0x0d5f
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B1_CTL 0x0d60
+#define WCD934X_CDC_SIDETONE_IIR0_IIR_COEF_B2_CTL 0x0d61
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_PATH_CTL 0x0d65
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B1_CTL 0x0d66
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B2_CTL 0x0d67
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B3_CTL 0x0d68
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B4_CTL 0x0d69
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B5_CTL 0x0d6a
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B6_CTL 0x0d6b
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B7_CTL 0x0d6c
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_B8_CTL 0x0d6d
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_CTL 0x0d6e
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_GAIN_TIMER_CTL 0x0d6f
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_COEF_B1_CTL 0x0d70
+#define WCD934X_CDC_SIDETONE_IIR1_IIR_COEF_B2_CTL 0x0d71
+#define WCD934X_CDC_TOP_TOP_CFG1 0x0d82
+#define WCD934X_CDC_TOP_TOP_CFG7 0x0d88
+#define WCD934X_CDC_TOP_HPHL_COMP_LUT 0x0d8b
+#define WCD934X_CDC_TOP_HPHR_COMP_LUT 0x0d90
+#define WCD934X_HPH_LUT_BYPASS_MASK BIT(7)
+#define WCD934X_HPH_LUT_BYPASS_ENABLE BIT(7)
+#define WCD934X_HPH_LUT_BYPASS_DISABLE 0
+#define WCD934X_CODEC_CPR_WR_DATA_0 0x5001
+#define WCD934X_CODEC_CPR_WR_ADDR_0 0x5005
+#define WCD934X_CODEC_CPR_SVS_CX_VDD 0x5022
+#define WCD934X_CODEC_CPR_SVS2_CX_VDD 0x5023
+#define WCD934X_CODEC_CPR_SVS2_MIN_CX_VDD 0x5027
+#define WCD934X_TLMM_DMIC1_CLK_PINCFG 0x8015
+#define WCD934X_TLMM_DMIC1_DATA_PINCFG 0x8016
+#define WCD934X_TLMM_DMIC2_CLK_PINCFG 0x8017
+#define WCD934X_TLMM_DMIC2_DATA_PINCFG 0x8018
+#define WCD934X_TLMM_DMIC3_CLK_PINCFG 0x8019
+#define WCD934X_TLMM_DMIC3_DATA_PINCFG 0x801a
+#define WCD934X_TEST_DEBUG_PAD_DRVCTL_0 0x803b
+#define WCD934X_TEST_DEBUG_NPL_DLY_TEST_1 0x803e
+
+#define WCD934X_MAX_REGISTER 0xffff
+#define WCD934X_SEL_REGISTER 0x800
+#define WCD934X_SEL_MASK 0xff
+#define WCD934X_SEL_SHIFT 0x0
+#define WCD934X_WINDOW_START 0x800
+#define WCD934X_WINDOW_LENGTH 0x100
+
+/* SLIMBUS Slave Registers */
+#define WCD934X_SLIM_PGD_PORT_INT_EN0 0x30
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_RX_0 0x34
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_RX_1 0x35
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_TX_0 0x36
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS_TX_1 0x37
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_RX_0 0x38
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_RX_1 0x39
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_TX_0 0x3A
+#define WCD934X_SLIM_PGD_PORT_INT_CLR_TX_1 0x3B
+#define WCD934X_SLIM_PGD_PORT_INT_RX_SOURCE0 0x60
+#define WCD934X_SLIM_PGD_PORT_INT_TX_SOURCE0 0x70
+#define WCD934X_SLIM_PGD_RX_PORT_CFG(p) (0x30 + p)
+#define WCD934X_SLIM_PGD_PORT_CFG(p) (0x40 + p)
+#define WCD934X_SLIM_PGD_TX_PORT_CFG(p) (0x50 + p)
+#define WCD934X_SLIM_PGD_PORT_INT_SRC(p) (0x60 + p)
+#define WCD934X_SLIM_PGD_PORT_INT_STATUS(p) (0x80 + p)
+#define WCD934X_SLIM_PGD_TX_PORT_MULTI_CHNL_0(p) (0x100 + 4 * p)
+/* ports range from 10-16 */
+#define WCD934X_SLIM_PGD_TX_PORT_MULTI_CHNL_1(p) (0x101 + 4 * p)
+#define WCD934X_SLIM_PGD_RX_PORT_MULTI_CHNL_0(p) (0x140 + 4 * p)
+
+#define SLIM_MANF_ID_QCOM 0x217
+#define SLIM_PROD_CODE_WCD9340 0x250
+#define SLIM_DEV_IDX_WCD9340 0x1
+#define SLIM_DEV_INSTANCE_ID_WCD9340 0
+
+#endif
diff --git a/include/linux/mfd/wcd934x/wcd934x.h b/include/linux/mfd/wcd934x/wcd934x.h
new file mode 100644
index 000000000000..f3c65a035150
--- /dev/null
+++ b/include/linux/mfd/wcd934x/wcd934x.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef __WCD934X_H__
+#define __WCD934X_H__
+#include <linux/clk.h>
+#include <linux/regulator/consumer.h>
+#include <linux/regmap.h>
+#include <linux/slimbus.h>
+
+#define WCD934X_MAX_SUPPLY 5
+
+/**
+ * struct wcd934x_ddata - wcd934x driver data
+ *
+ * @supplies: wcd934x regulator supplies
+ * @irq_data: wcd934x irq_chip data
+ * @regmap: wcd934x regmap pointer
+ * @extclk: External clock
+ * @dev: device instance of wcd934x slim device
+ * @irq: irq for wcd934x.
+ */
+struct wcd934x_ddata {
+ struct regulator_bulk_data supplies[WCD934X_MAX_SUPPLY];
+ struct regmap_irq_chip_data *irq_data;
+ struct regmap *regmap;
+ struct clk *extclk;
+ struct device *dev;
+ int irq;
+};
+
+#endif /* __WCD934X_H__ */
diff --git a/include/linux/mii.h b/include/linux/mii.h
index 4ce8901a1af6..18c6208f56fc 100644
--- a/include/linux/mii.h
+++ b/include/linux/mii.h
@@ -373,6 +373,56 @@ static inline u32 mii_lpa_to_ethtool_lpa_x(u32 lpa)
}
/**
+ * mii_lpa_mod_linkmode_adv_sgmii
+ * @lp_advertising: pointer to destination link mode.
+ * @lpa: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_LPA bits to
+ * linkmode advertisement settings for SGMII.
+ * Leaves other bits unchanged.
+ */
+static inline void
+mii_lpa_mod_linkmode_lpa_sgmii(unsigned long *lp_advertising, u32 lpa)
+{
+ u32 speed_duplex = lpa & LPA_SGMII_DPX_SPD_MASK;
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, lp_advertising,
+ speed_duplex == LPA_SGMII_1000HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, lp_advertising,
+ speed_duplex == LPA_SGMII_1000FULL);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, lp_advertising,
+ speed_duplex == LPA_SGMII_100HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, lp_advertising,
+ speed_duplex == LPA_SGMII_100FULL);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, lp_advertising,
+ speed_duplex == LPA_SGMII_10HALF);
+
+ linkmode_mod_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, lp_advertising,
+ speed_duplex == LPA_SGMII_10FULL);
+}
+
+/**
+ * mii_lpa_to_linkmode_adv_sgmii
+ * @advertising: pointer to destination link mode.
+ * @lpa: value of the MII_LPA register
+ *
+ * A small helper function that translates MII_ADVERTISE bits
+ * to linkmode advertisement settings when in SGMII mode.
+ * Clears the old value of advertising.
+ */
+static inline void mii_lpa_to_linkmode_lpa_sgmii(unsigned long *lp_advertising,
+ u32 lpa)
+{
+ linkmode_zero(lp_advertising);
+
+ mii_lpa_mod_linkmode_lpa_sgmii(lp_advertising, lpa);
+}
+
+/**
* mii_adv_mod_linkmode_adv_t
* @advertising:pointer to destination link mode.
* @adv: value of the MII_ADVERTISE register
diff --git a/include/linux/mii_timestamper.h b/include/linux/mii_timestamper.h
new file mode 100644
index 000000000000..fa940bbaf8ae
--- /dev/null
+++ b/include/linux/mii_timestamper.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Support for generic time stamping devices on MII buses.
+ * Copyright (C) 2018 Richard Cochran <richardcochran@gmail.com>
+ */
+#ifndef _LINUX_MII_TIMESTAMPER_H
+#define _LINUX_MII_TIMESTAMPER_H
+
+#include <linux/device.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+
+struct phy_device;
+
+/**
+ * struct mii_timestamper - Callback interface to MII time stamping devices.
+ *
+ * @rxtstamp: Requests a Rx timestamp for 'skb'. If the skb is accepted,
+ * the MII time stamping device promises to deliver it using
+ * netif_rx() as soon as a timestamp becomes available. One of
+ * the PTP_CLASS_ values is passed in 'type'. The function
+ * must return true if the skb is accepted for delivery.
+ *
+ * @txtstamp: Requests a Tx timestamp for 'skb'. The MII time stamping
+ * device promises to deliver it using skb_complete_tx_timestamp()
+ * as soon as a timestamp becomes available. One of the PTP_CLASS_
+ * values is passed in 'type'.
+ *
+ * @hwtstamp: Handles SIOCSHWTSTAMP ioctl for hardware time stamping.
+ *
+ * @link_state: Allows the device to respond to changes in the link
+ * state. The caller invokes this function while holding
+ * the phy_device mutex.
+ *
+ * @ts_info: Handles ethtool queries for hardware time stamping.
+ * @device: Remembers the device to which the instance belongs.
+ *
+ * Drivers for PHY time stamping devices should embed their
+ * mii_timestamper within a private structure, obtaining a reference
+ * to it using container_of().
+ *
+ * Drivers for non-PHY time stamping devices should return a pointer
+ * to a mii_timestamper from the probe_channel() callback of their
+ * mii_timestamping_ctrl interface.
+ */
+struct mii_timestamper {
+ bool (*rxtstamp)(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type);
+
+ void (*txtstamp)(struct mii_timestamper *mii_ts,
+ struct sk_buff *skb, int type);
+
+ int (*hwtstamp)(struct mii_timestamper *mii_ts,
+ struct ifreq *ifreq);
+
+ void (*link_state)(struct mii_timestamper *mii_ts,
+ struct phy_device *phydev);
+
+ int (*ts_info)(struct mii_timestamper *mii_ts,
+ struct ethtool_ts_info *ts_info);
+
+ struct device *device;
+};
+
+/**
+ * struct mii_timestamping_ctrl - MII time stamping controller interface.
+ *
+ * @probe_channel: Callback into the controller driver announcing the
+ * presence of the 'port' channel. The 'device' field
+ * had been passed to register_mii_tstamp_controller().
+ * The driver must return either a pointer to a valid
+ * MII timestamper instance or PTR_ERR.
+ *
+ * @release_channel: Releases an instance obtained via .probe_channel.
+ */
+struct mii_timestamping_ctrl {
+ struct mii_timestamper *(*probe_channel)(struct device *device,
+ unsigned int port);
+ void (*release_channel)(struct device *device,
+ struct mii_timestamper *mii_ts);
+};
+
+#ifdef CONFIG_NETWORK_PHY_TIMESTAMPING
+
+int register_mii_tstamp_controller(struct device *device,
+ struct mii_timestamping_ctrl *ctrl);
+
+void unregister_mii_tstamp_controller(struct device *device);
+
+struct mii_timestamper *register_mii_timestamper(struct device_node *node,
+ unsigned int port);
+
+void unregister_mii_timestamper(struct mii_timestamper *mii_ts);
+
+#else
+
+static inline
+int register_mii_tstamp_controller(struct device *device,
+ struct mii_timestamping_ctrl *ctrl)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void unregister_mii_tstamp_controller(struct device *device)
+{
+}
+
+static inline
+struct mii_timestamper *register_mii_timestamper(struct device_node *node,
+ unsigned int port)
+{
+ return NULL;
+}
+
+static inline void unregister_mii_timestamper(struct mii_timestamper *mii_ts)
+{
+}
+
+#endif
+
+#endif
diff --git a/include/linux/min_heap.h b/include/linux/min_heap.h
new file mode 100644
index 000000000000..44077837385f
--- /dev/null
+++ b/include/linux/min_heap.h
@@ -0,0 +1,134 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MIN_HEAP_H
+#define _LINUX_MIN_HEAP_H
+
+#include <linux/bug.h>
+#include <linux/string.h>
+#include <linux/types.h>
+
+/**
+ * struct min_heap - Data structure to hold a min-heap.
+ * @data: Start of array holding the heap elements.
+ * @nr: Number of elements currently in the heap.
+ * @size: Maximum number of elements that can be held in current storage.
+ */
+struct min_heap {
+ void *data;
+ int nr;
+ int size;
+};
+
+/**
+ * struct min_heap_callbacks - Data/functions to customise the min_heap.
+ * @elem_size: The nr of each element in bytes.
+ * @less: Partial order function for this heap.
+ * @swp: Swap elements function.
+ */
+struct min_heap_callbacks {
+ int elem_size;
+ bool (*less)(const void *lhs, const void *rhs);
+ void (*swp)(void *lhs, void *rhs);
+};
+
+/* Sift the element at pos down the heap. */
+static __always_inline
+void min_heapify(struct min_heap *heap, int pos,
+ const struct min_heap_callbacks *func)
+{
+ void *left, *right, *parent, *smallest;
+ void *data = heap->data;
+
+ for (;;) {
+ if (pos * 2 + 1 >= heap->nr)
+ break;
+
+ left = data + ((pos * 2 + 1) * func->elem_size);
+ parent = data + (pos * func->elem_size);
+ smallest = parent;
+ if (func->less(left, smallest))
+ smallest = left;
+
+ if (pos * 2 + 2 < heap->nr) {
+ right = data + ((pos * 2 + 2) * func->elem_size);
+ if (func->less(right, smallest))
+ smallest = right;
+ }
+ if (smallest == parent)
+ break;
+ func->swp(smallest, parent);
+ if (smallest == left)
+ pos = (pos * 2) + 1;
+ else
+ pos = (pos * 2) + 2;
+ }
+}
+
+/* Floyd's approach to heapification that is O(nr). */
+static __always_inline
+void min_heapify_all(struct min_heap *heap,
+ const struct min_heap_callbacks *func)
+{
+ int i;
+
+ for (i = heap->nr / 2; i >= 0; i--)
+ min_heapify(heap, i, func);
+}
+
+/* Remove minimum element from the heap, O(log2(nr)). */
+static __always_inline
+void min_heap_pop(struct min_heap *heap,
+ const struct min_heap_callbacks *func)
+{
+ void *data = heap->data;
+
+ if (WARN_ONCE(heap->nr <= 0, "Popping an empty heap"))
+ return;
+
+ /* Place last element at the root (position 0) and then sift down. */
+ heap->nr--;
+ memcpy(data, data + (heap->nr * func->elem_size), func->elem_size);
+ min_heapify(heap, 0, func);
+}
+
+/*
+ * Remove the minimum element and then push the given element. The
+ * implementation performs 1 sift (O(log2(nr))) and is therefore more
+ * efficient than a pop followed by a push that does 2.
+ */
+static __always_inline
+void min_heap_pop_push(struct min_heap *heap,
+ const void *element,
+ const struct min_heap_callbacks *func)
+{
+ memcpy(heap->data, element, func->elem_size);
+ min_heapify(heap, 0, func);
+}
+
+/* Push an element on to the heap, O(log2(nr)). */
+static __always_inline
+void min_heap_push(struct min_heap *heap, const void *element,
+ const struct min_heap_callbacks *func)
+{
+ void *data = heap->data;
+ void *child, *parent;
+ int pos;
+
+ if (WARN_ONCE(heap->nr >= heap->size, "Pushing on a full heap"))
+ return;
+
+ /* Place at the end of data. */
+ pos = heap->nr;
+ memcpy(data + (pos * func->elem_size), element, func->elem_size);
+ heap->nr++;
+
+ /* Sift child at pos up. */
+ for (; pos > 0; pos = (pos - 1) / 2) {
+ child = data + (pos * func->elem_size);
+ parent = data + ((pos - 1) / 2) * func->elem_size;
+ if (func->less(parent, child))
+ break;
+ func->swp(parent, child);
+ }
+}
+
+#endif /* _LINUX_MIN_HEAP_H */
diff --git a/include/linux/miscdevice.h b/include/linux/miscdevice.h
index 3247a3dc7934..becde6981a95 100644
--- a/include/linux/miscdevice.h
+++ b/include/linux/miscdevice.h
@@ -33,6 +33,7 @@
#define SGI_MMTIMER 153
#define STORE_QUEUE_MINOR 155 /* unused */
#define I2O_MINOR 166
+#define AGPGART_MINOR 175
#define HWRNG_MINOR 183
#define MICROCODE_MINOR 184
#define IRNET_MINOR 187
@@ -57,6 +58,7 @@
#define UHID_MINOR 239
#define USERIO_MINOR 240
#define VHOST_VSOCK_MINOR 241
+#define RFKILL_MINOR 242
#define MISC_DYNAMIC_MINOR 255
struct device;
diff --git a/include/linux/mlx4/cq.h b/include/linux/mlx4/cq.h
index 508e8cc5ee86..653d2a0aa44c 100644
--- a/include/linux/mlx4/cq.h
+++ b/include/linux/mlx4/cq.h
@@ -130,6 +130,11 @@ enum {
MLX4_CQE_STATUS_IPOK = 1 << 12,
};
+/* L4_CSUM is logically part of status, but has to checked against badfcs_enc */
+enum {
+ MLX4_CQE_STATUS_L4_CSUM = 1 << 2,
+};
+
enum {
MLX4_CQE_LLC = 1,
MLX4_CQE_SNAP = 1 << 1,
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 36e412c3d657..20372de0b587 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -47,7 +47,7 @@
#define DEFAULT_UAR_PAGE_SHIFT 12
#define MAX_MSIX_P_PORT 17
-#define MAX_MSIX 64
+#define MAX_MSIX 128
#define MIN_MSIX_P_PORT 5
#define MLX4_IS_LEGACY_EQ_MODE(dev_cap) ((dev_cap).num_comp_vectors < \
(dev_cap).num_ports * MIN_MSIX_P_PORT)
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index cc1c230f10ee..0e62c3db45e5 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -1105,6 +1105,7 @@ enum mlx5_cap_type {
MLX5_CAP_DEV_MEM,
MLX5_CAP_RESERVED_16,
MLX5_CAP_TLS,
+ MLX5_CAP_VDPA_EMULATION = 0x13,
MLX5_CAP_DEV_EVENT = 0x14,
/* NUM OF CAP Types */
MLX5_CAP_NUM
@@ -1120,6 +1121,9 @@ enum mlx5_pcam_feature_groups {
enum mlx5_mcam_reg_groups {
MLX5_MCAM_REGS_FIRST_128 = 0x0,
+ MLX5_MCAM_REGS_0x9080_0x90FF = 0x1,
+ MLX5_MCAM_REGS_0x9100_0x917F = 0x2,
+ MLX5_MCAM_REGS_NUM = 0x3,
};
enum mlx5_mcam_feature_groups {
@@ -1268,7 +1272,16 @@ enum mlx5_qcam_feature_groups {
MLX5_GET(pcam_reg, (mdev)->caps.pcam, port_access_reg_cap_mask.regs_5000_to_507f.reg)
#define MLX5_CAP_MCAM_REG(mdev, reg) \
- MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_access_reg_cap_mask.access_regs.reg)
+ MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_FIRST_128], \
+ mng_access_reg_cap_mask.access_regs.reg)
+
+#define MLX5_CAP_MCAM_REG1(mdev, reg) \
+ MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9080_0x90FF], \
+ mng_access_reg_cap_mask.access_regs1.reg)
+
+#define MLX5_CAP_MCAM_REG2(mdev, reg) \
+ MLX5_GET(mcam_reg, (mdev)->caps.mcam[MLX5_MCAM_REGS_0x9100_0x917F], \
+ mng_access_reg_cap_mask.access_regs2.reg)
#define MLX5_CAP_MCAM_FEATURE(mdev, fld) \
MLX5_GET(mcam_reg, (mdev)->caps.mcam, mng_feature_cap_mask.enhanced_features.fld)
@@ -1297,6 +1310,14 @@ enum mlx5_qcam_feature_groups {
#define MLX5_CAP_DEV_EVENT(mdev, cap)\
MLX5_ADDR_OF(device_event_cap, (mdev)->caps.hca_cur[MLX5_CAP_DEV_EVENT], cap)
+#define MLX5_CAP_DEV_VDPA_EMULATION(mdev, cap)\
+ MLX5_GET(device_virtio_emulation_cap, \
+ (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
+
+#define MLX5_CAP64_DEV_VDPA_EMULATION(mdev, cap)\
+ MLX5_GET64(device_virtio_emulation_cap, \
+ (mdev)->caps.hca_cur[MLX5_CAP_VDPA_EMULATION], cap)
+
enum {
MLX5_CMD_STAT_OK = 0x0,
MLX5_CMD_STAT_INT_ERR = 0x1,
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 3e80f03a387f..277a51d3ec40 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -145,6 +145,8 @@ enum {
MLX5_REG_MCC = 0x9062,
MLX5_REG_MCDA = 0x9063,
MLX5_REG_MCAM = 0x907f,
+ MLX5_REG_MIRC = 0x9162,
+ MLX5_REG_RESOURCE_DUMP = 0xC000,
};
enum mlx5_qpts_trust_state {
@@ -461,6 +463,11 @@ struct mlx5_vf_context {
int enabled;
u64 port_guid;
u64 node_guid;
+ /* Valid bits are used to validate administrative guid only.
+ * Enabled after ndo_set_vf_guid
+ */
+ u8 port_guid_valid:1;
+ u8 node_guid_valid:1;
enum port_state_policy policy;
};
@@ -556,8 +563,6 @@ struct mlx5_priv {
struct dentry *cmdif_debugfs;
/* end: qp staff */
- struct xarray mkey_table;
-
/* start: alloc staff */
/* protect buffer alocation according to numa node */
struct mutex alloc_mutex;
@@ -686,7 +691,7 @@ struct mlx5_core_dev {
u32 hca_cur[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
u32 hca_max[MLX5_CAP_NUM][MLX5_UN_SZ_DW(hca_cap_union)];
u32 pcam[MLX5_ST_SZ_DW(pcam_reg)];
- u32 mcam[MLX5_ST_SZ_DW(mcam_reg)];
+ u32 mcam[MLX5_MCAM_REGS_NUM][MLX5_ST_SZ_DW(mcam_reg)];
u32 fpga[MLX5_ST_SZ_DW(fpga_cap)];
u32 qcam[MLX5_ST_SZ_DW(qcam_reg)];
u8 embedded_cpu;
@@ -930,8 +935,6 @@ void mlx5_start_health_poll(struct mlx5_core_dev *dev);
void mlx5_stop_health_poll(struct mlx5_core_dev *dev, bool disable_health);
void mlx5_drain_health_wq(struct mlx5_core_dev *dev);
void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
-int mlx5_buf_alloc_node(struct mlx5_core_dev *dev, int size,
- struct mlx5_frag_buf *buf, int node);
int mlx5_buf_alloc(struct mlx5_core_dev *dev,
int size, struct mlx5_frag_buf *buf);
void mlx5_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
@@ -942,8 +945,6 @@ struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
gfp_t flags, int npages);
void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
struct mlx5_cmd_mailbox *head);
-void mlx5_init_mkey_table(struct mlx5_core_dev *dev);
-void mlx5_cleanup_mkey_table(struct mlx5_core_dev *dev);
int mlx5_core_create_mkey_cb(struct mlx5_core_dev *dev,
struct mlx5_core_mkey *mkey,
struct mlx5_async_ctx *async_ctx, u32 *in,
@@ -1121,6 +1122,11 @@ static inline bool mlx5_core_is_pf(const struct mlx5_core_dev *dev)
return dev->coredev_type == MLX5_COREDEV_PF;
}
+static inline bool mlx5_core_is_vf(const struct mlx5_core_dev *dev)
+{
+ return dev->coredev_type == MLX5_COREDEV_VF;
+}
+
static inline bool mlx5_core_is_ecpf(struct mlx5_core_dev *dev)
{
return dev->caps.embedded_cpu;
@@ -1186,4 +1192,15 @@ enum {
MLX5_TRIGGERED_CMD_COMP = (u64)1 << 32,
};
+static inline bool mlx5_is_roce_enabled(struct mlx5_core_dev *dev)
+{
+ struct devlink *devlink = priv_to_devlink(dev);
+ union devlink_param_value val;
+
+ devlink_param_driverinit_value_get(devlink,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
+ &val);
+ return val.vbool;
+}
+
#endif /* MLX5_DRIVER_H */
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 724d276ea133..4cae16016b2b 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -48,6 +48,7 @@ enum {
MLX5_FLOW_TABLE_TUNNEL_EN_REFORMAT = BIT(0),
MLX5_FLOW_TABLE_TUNNEL_EN_DECAP = BIT(1),
MLX5_FLOW_TABLE_TERMINATION = BIT(2),
+ MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
};
#define LEFTOVERS_RULE_NUM 2
@@ -80,7 +81,8 @@ enum mlx5_flow_namespace_type {
enum {
FDB_BYPASS_PATH,
- FDB_FAST_PATH,
+ FDB_TC_OFFLOAD,
+ FDB_FT_OFFLOAD,
FDB_SLOW_PATH,
};
@@ -144,19 +146,17 @@ mlx5_get_flow_vport_acl_namespace(struct mlx5_core_dev *dev,
enum mlx5_flow_namespace_type type,
int vport);
-struct mlx5_flow_table *
-mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- int num_flow_table_entries,
- int max_num_groups,
- u32 level,
- u32 flags);
-
struct mlx5_flow_table_attr {
int prio;
int max_fte;
u32 level;
u32 flags;
+ struct mlx5_flow_table *next_ft;
+
+ struct {
+ int max_num_groups;
+ int num_reserved_entries;
+ } autogroup;
};
struct mlx5_flow_table *
@@ -164,6 +164,10 @@ mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
struct mlx5_flow_table_attr *ft_attr);
struct mlx5_flow_table *
+mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
+ struct mlx5_flow_table_attr *ft_attr);
+
+struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
int prio,
int num_flow_table_entries,
@@ -193,6 +197,7 @@ struct mlx5_fs_vlan {
enum {
FLOW_ACT_NO_APPEND = BIT(0),
+ FLOW_ACT_IGNORE_FLOW_LEVEL = BIT(1),
};
struct mlx5_flow_act {
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 0836fe232f97..bfdf41537cf1 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -87,6 +87,7 @@ enum {
enum {
MLX5_GENERAL_OBJ_TYPES_CAP_SW_ICM = (1ULL << MLX5_OBJ_TYPE_SW_ICM),
MLX5_GENERAL_OBJ_TYPES_CAP_GENEVE_TLV_OPT = (1ULL << 11),
+ MLX5_GENERAL_OBJ_TYPES_CAP_VIRTIO_NET_Q = (1ULL << 13),
};
enum {
@@ -374,8 +375,17 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_esp_spi[0x1];
u8 reserved_at_58[0x2];
u8 bth_dst_qp[0x1];
+ u8 reserved_at_5b[0x5];
- u8 reserved_at_5b[0x25];
+ u8 reserved_at_60[0x18];
+ u8 metadata_reg_c_7[0x1];
+ u8 metadata_reg_c_6[0x1];
+ u8 metadata_reg_c_5[0x1];
+ u8 metadata_reg_c_4[0x1];
+ u8 metadata_reg_c_3[0x1];
+ u8 metadata_reg_c_2[0x1];
+ u8 metadata_reg_c_1[0x1];
+ u8 metadata_reg_c_0[0x1];
};
struct mlx5_ifc_flow_table_prop_layout_bits {
@@ -400,7 +410,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reformat_l3_tunnel_to_l2[0x1];
u8 reformat_l2_to_l3_tunnel[0x1];
u8 reformat_and_modify_action[0x1];
- u8 reserved_at_15[0x2];
+ u8 ignore_flow_level[0x1];
+ u8 reserved_at_16[0x1];
u8 table_miss_action_domain[0x1];
u8 termination_table[0x1];
u8 reserved_at_19[0x7];
@@ -677,7 +688,10 @@ struct mlx5_ifc_flow_table_nic_cap_bits {
u8 nic_rx_multi_path_tirs[0x1];
u8 nic_rx_multi_path_tirs_fts[0x1];
u8 allow_sniffer_and_nic_rx_shared_tir[0x1];
- u8 reserved_at_3[0x1d];
+ u8 reserved_at_3[0x4];
+ u8 sw_owner_reformat_supported[0x1];
+ u8 reserved_at_8[0x18];
+
u8 encap_general_header[0x1];
u8 reserved_at_21[0xa];
u8 log_max_packet_reformat_context[0x5];
@@ -721,7 +735,9 @@ enum {
struct mlx5_ifc_flow_table_eswitch_cap_bits {
u8 fdb_to_vport_reg_c_id[0x8];
- u8 reserved_at_8[0xf];
+ u8 reserved_at_8[0xd];
+ u8 fdb_modify_header_fwd_to_table[0x1];
+ u8 reserved_at_16[0x1];
u8 flow_source[0x1];
u8 reserved_at_18[0x2];
u8 multi_fdb_encap[0x1];
@@ -822,7 +838,9 @@ struct mlx5_ifc_qos_cap_bits {
struct mlx5_ifc_debug_cap_bits {
u8 core_dump_general[0x1];
u8 core_dump_qp[0x1];
- u8 reserved_at_2[0x1e];
+ u8 reserved_at_2[0x7];
+ u8 resource_dump[0x1];
+ u8 reserved_at_a[0x16];
u8 reserved_at_20[0x2];
u8 stall_detect[0x1];
@@ -953,6 +971,19 @@ struct mlx5_ifc_device_event_cap_bits {
u8 user_unaffiliated_events[4][0x40];
};
+struct mlx5_ifc_device_virtio_emulation_cap_bits {
+ u8 reserved_at_0[0x20];
+
+ u8 reserved_at_20[0x13];
+ u8 log_doorbell_stride[0x5];
+ u8 reserved_at_38[0x3];
+ u8 log_doorbell_bar_size[0x5];
+
+ u8 doorbell_bar_offset[0x40];
+
+ u8 reserved_at_80[0x780];
+};
+
enum {
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_1_BYTE = 0x0,
MLX5_ATOMIC_CAPS_ATOMIC_SIZE_QP_2_BYTES = 0x2,
@@ -1110,6 +1141,7 @@ enum {
};
enum {
+ MLX5_FLEX_PARSER_GENEVE_ENABLED = 1 << 3,
MLX5_FLEX_PARSER_VXLAN_GPE_ENABLED = 1 << 7,
MLX5_FLEX_PARSER_ICMP_V4_ENABLED = 1 << 8,
MLX5_FLEX_PARSER_ICMP_V6_ENABLED = 1 << 9,
@@ -1153,13 +1185,14 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 log_max_srq[0x5];
u8 reserved_at_b0[0x10];
- u8 reserved_at_c0[0x8];
+ u8 max_sgl_for_optimized_performance[0x8];
u8 log_max_cq_sz[0x8];
u8 reserved_at_d0[0xb];
u8 log_max_cq[0x5];
u8 log_max_eq_sz[0x8];
- u8 reserved_at_e8[0x2];
+ u8 relaxed_ordering_write[0x1];
+ u8 relaxed_ordering_read[0x1];
u8 log_max_mkey[0x6];
u8 reserved_at_f0[0x8];
u8 dump_fill_mkey[0x1];
@@ -1182,7 +1215,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_130[0xa];
u8 log_max_ra_res_dc[0x6];
- u8 reserved_at_140[0xa];
+ u8 reserved_at_140[0x9];
+ u8 roce_accl[0x1];
u8 log_max_ra_req_qp[0x6];
u8 reserved_at_150[0xa];
u8 log_max_ra_res_qp[0x6];
@@ -1417,14 +1451,15 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 reserved_at_440[0x20];
- u8 tls[0x1];
- u8 reserved_at_461[0x2];
+ u8 reserved_at_460[0x3];
u8 log_max_uctx[0x5];
u8 reserved_at_468[0x3];
u8 log_max_umem[0x5];
u8 max_num_eqs[0x10];
- u8 reserved_at_480[0x3];
+ u8 reserved_at_480[0x1];
+ u8 tls_tx[0x1];
+ u8 reserved_at_482[0x1];
u8 log_max_l2_table[0x5];
u8 reserved_at_488[0x8];
u8 log_uar_page_sz[0x10];
@@ -1752,6 +1787,132 @@ struct mlx5_ifc_resize_field_select_bits {
u8 resize_field_select[0x20];
};
+struct mlx5_ifc_resource_dump_bits {
+ u8 more_dump[0x1];
+ u8 inline_dump[0x1];
+ u8 reserved_at_2[0xa];
+ u8 seq_num[0x4];
+ u8 segment_type[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 vhca_id[0x10];
+
+ u8 index1[0x20];
+
+ u8 index2[0x20];
+
+ u8 num_of_obj1[0x10];
+ u8 num_of_obj2[0x10];
+
+ u8 reserved_at_a0[0x20];
+
+ u8 device_opaque[0x40];
+
+ u8 mkey[0x20];
+
+ u8 size[0x20];
+
+ u8 address[0x40];
+
+ u8 inline_data[52][0x20];
+};
+
+struct mlx5_ifc_resource_dump_menu_record_bits {
+ u8 reserved_at_0[0x4];
+ u8 num_of_obj2_supports_active[0x1];
+ u8 num_of_obj2_supports_all[0x1];
+ u8 must_have_num_of_obj2[0x1];
+ u8 support_num_of_obj2[0x1];
+ u8 num_of_obj1_supports_active[0x1];
+ u8 num_of_obj1_supports_all[0x1];
+ u8 must_have_num_of_obj1[0x1];
+ u8 support_num_of_obj1[0x1];
+ u8 must_have_index2[0x1];
+ u8 support_index2[0x1];
+ u8 must_have_index1[0x1];
+ u8 support_index1[0x1];
+ u8 segment_type[0x10];
+
+ u8 segment_name[4][0x20];
+
+ u8 index1_name[4][0x20];
+
+ u8 index2_name[4][0x20];
+};
+
+struct mlx5_ifc_resource_dump_segment_header_bits {
+ u8 length_dw[0x10];
+ u8 segment_type[0x10];
+};
+
+struct mlx5_ifc_resource_dump_command_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+
+ u8 segment_called[0x10];
+ u8 vhca_id[0x10];
+
+ u8 index1[0x20];
+
+ u8 index2[0x20];
+
+ u8 num_of_obj1[0x10];
+ u8 num_of_obj2[0x10];
+};
+
+struct mlx5_ifc_resource_dump_error_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+
+ u8 reserved_at_20[0x10];
+ u8 syndrome_id[0x10];
+
+ u8 reserved_at_40[0x40];
+
+ u8 error[8][0x20];
+};
+
+struct mlx5_ifc_resource_dump_info_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+
+ u8 reserved_at_20[0x18];
+ u8 dump_version[0x8];
+
+ u8 hw_version[0x20];
+
+ u8 fw_version[0x20];
+};
+
+struct mlx5_ifc_resource_dump_menu_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+
+ u8 reserved_at_20[0x10];
+ u8 num_of_records[0x10];
+
+ struct mlx5_ifc_resource_dump_menu_record_bits record[0];
+};
+
+struct mlx5_ifc_resource_dump_resource_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+
+ u8 reserved_at_20[0x20];
+
+ u8 index1[0x20];
+
+ u8 index2[0x20];
+
+ u8 payload[0][0x20];
+};
+
+struct mlx5_ifc_resource_dump_terminate_segment_bits {
+ struct mlx5_ifc_resource_dump_segment_header_bits segment_header;
+};
+
+struct mlx5_ifc_menu_resource_dump_response_bits {
+ struct mlx5_ifc_resource_dump_info_segment_bits info;
+ struct mlx5_ifc_resource_dump_command_segment_bits cmd;
+ struct mlx5_ifc_resource_dump_menu_segment_bits menu;
+ struct mlx5_ifc_resource_dump_terminate_segment_bits terminate;
+};
+
enum {
MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_PERIOD = 0x1,
MLX5_MODIFY_FIELD_SELECT_MODIFY_FIELD_SELECT_CQ_MAX_COUNT = 0x2,
@@ -2025,7 +2186,9 @@ struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
u8 rx_pause_transition_low[0x20];
- u8 reserved_at_3c0[0x40];
+ u8 rx_discards_high[0x20];
+
+ u8 rx_discards_low[0x20];
u8 device_stall_minor_watermark_cnt_high[0x20];
@@ -2750,6 +2913,7 @@ union mlx5_ifc_hca_cap_union_bits {
struct mlx5_ifc_fpga_cap_bits fpga_cap;
struct mlx5_ifc_tls_cap_bits tls_cap;
struct mlx5_ifc_device_mem_cap_bits device_mem_cap;
+ struct mlx5_ifc_device_virtio_emulation_cap_bits virtio_emulation_cap;
u8 reserved_at_0[0x8000];
};
@@ -3270,7 +3434,9 @@ struct mlx5_ifc_mkc_bits {
u8 translations_octword_size[0x20];
- u8 reserved_at_1c0[0x1b];
+ u8 reserved_at_1c0[0x19];
+ u8 relaxed_ordering_read[0x1];
+ u8 reserved_at_1d9[0x1];
u8 log_page_size[0x5];
u8 reserved_at_1e0[0x20];
@@ -3997,7 +4163,8 @@ struct mlx5_ifc_set_fte_in_bits {
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
- u8 reserved_at_c0[0x18];
+ u8 ignore_flow_level[0x1];
+ u8 reserved_at_c1[0x17];
u8 modify_enable_mask[0x8];
u8 reserved_at_e0[0x20];
@@ -4730,7 +4897,19 @@ struct mlx5_ifc_query_q_counter_out_bits {
u8 req_cqe_flush_error[0x20];
- u8 reserved_at_620[0x1e0];
+ u8 reserved_at_620[0x20];
+
+ u8 roce_adp_retrans[0x20];
+
+ u8 roce_adp_retrans_to[0x20];
+
+ u8 roce_slow_restart[0x20];
+
+ u8 roce_slow_restart_cnps[0x20];
+
+ u8 roce_slow_restart_trans[0x20];
+
+ u8 reserved_at_6e0[0x120];
};
struct mlx5_ifc_query_q_counter_in_bits {
@@ -5465,15 +5644,32 @@ struct mlx5_ifc_add_action_in_bits {
u8 data[0x20];
};
+struct mlx5_ifc_copy_action_in_bits {
+ u8 action_type[0x4];
+ u8 src_field[0xc];
+ u8 reserved_at_10[0x3];
+ u8 src_offset[0x5];
+ u8 reserved_at_18[0x3];
+ u8 length[0x5];
+
+ u8 reserved_at_20[0x4];
+ u8 dst_field[0xc];
+ u8 reserved_at_30[0x3];
+ u8 dst_offset[0x5];
+ u8 reserved_at_38[0x8];
+};
+
union mlx5_ifc_set_action_in_add_action_in_auto_bits {
struct mlx5_ifc_set_action_in_bits set_action_in;
struct mlx5_ifc_add_action_in_bits add_action_in;
+ struct mlx5_ifc_copy_action_in_bits copy_action_in;
u8 reserved_at_0[0x40];
};
enum {
MLX5_ACTION_TYPE_SET = 0x1,
MLX5_ACTION_TYPE_ADD = 0x2,
+ MLX5_ACTION_TYPE_COPY = 0x3,
};
enum {
@@ -5509,6 +5705,8 @@ enum {
MLX5_ACTION_IN_FIELD_METADATA_REG_C_3 = 0x54,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_4 = 0x55,
MLX5_ACTION_IN_FIELD_METADATA_REG_C_5 = 0x56,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_6 = 0x57,
+ MLX5_ACTION_IN_FIELD_METADATA_REG_C_7 = 0x58,
MLX5_ACTION_IN_FIELD_OUT_TCP_SEQ_NUM = 0x59,
MLX5_ACTION_IN_FIELD_OUT_TCP_ACK_NUM = 0x5B,
};
@@ -8405,6 +8603,18 @@ struct mlx5_ifc_pplm_reg_bits {
u8 fec_override_admin_50g[0x4];
u8 fec_override_admin_25g[0x4];
u8 fec_override_admin_10g_40g[0x4];
+
+ u8 fec_override_cap_400g_8x[0x10];
+ u8 fec_override_cap_200g_4x[0x10];
+
+ u8 fec_override_cap_100g_2x[0x10];
+ u8 fec_override_cap_50g_1x[0x10];
+
+ u8 fec_override_admin_400g_8x[0x10];
+ u8 fec_override_admin_200g_4x[0x10];
+
+ u8 fec_override_admin_100g_2x[0x10];
+ u8 fec_override_admin_50g_1x[0x10];
};
struct mlx5_ifc_ppcnt_reg_bits {
@@ -8731,7 +8941,9 @@ struct mlx5_ifc_mpegc_reg_bits {
};
struct mlx5_ifc_pcam_enhanced_features_bits {
- u8 reserved_at_0[0x6d];
+ u8 reserved_at_0[0x68];
+ u8 fec_50G_per_lane_in_pplm[0x1];
+ u8 reserved_at_69[0x4];
u8 rx_icrc_encapsulated_counter[0x1];
u8 reserved_at_6e[0x4];
u8 ptys_extended_ethernet[0x1];
@@ -8816,6 +9028,28 @@ struct mlx5_ifc_mcam_access_reg_bits {
u8 regs_31_to_0[0x20];
};
+struct mlx5_ifc_mcam_access_reg_bits1 {
+ u8 regs_127_to_96[0x20];
+
+ u8 regs_95_to_64[0x20];
+
+ u8 regs_63_to_32[0x20];
+
+ u8 regs_31_to_0[0x20];
+};
+
+struct mlx5_ifc_mcam_access_reg_bits2 {
+ u8 regs_127_to_99[0x1d];
+ u8 mirc[0x1];
+ u8 regs_97_to_96[0x2];
+
+ u8 regs_95_to_64[0x20];
+
+ u8 regs_63_to_32[0x20];
+
+ u8 regs_31_to_0[0x20];
+};
+
struct mlx5_ifc_mcam_reg_bits {
u8 reserved_at_0[0x8];
u8 feature_group[0x8];
@@ -8826,6 +9060,8 @@ struct mlx5_ifc_mcam_reg_bits {
union {
struct mlx5_ifc_mcam_access_reg_bits access_regs;
+ struct mlx5_ifc_mcam_access_reg_bits1 access_regs1;
+ struct mlx5_ifc_mcam_access_reg_bits2 access_regs2;
u8 reserved_at_0[0x80];
} mng_access_reg_cap_mask;
@@ -9431,6 +9667,13 @@ struct mlx5_ifc_mcda_reg_bits {
u8 data[0][0x20];
};
+struct mlx5_ifc_mirc_reg_bits {
+ u8 reserved_at_0[0x18];
+ u8 status_code[0x8];
+
+ u8 reserved_at_20[0x20];
+};
+
union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_bufferx_reg_bits bufferx_reg;
struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits eth_2819_cntrs_grp_data_layout;
@@ -9486,6 +9729,7 @@ union mlx5_ifc_ports_control_registers_document_bits {
struct mlx5_ifc_mcqi_reg_bits mcqi_reg;
struct mlx5_ifc_mcc_reg_bits mcc_reg;
struct mlx5_ifc_mcda_reg_bits mcda_reg;
+ struct mlx5_ifc_mirc_reg_bits mirc_reg;
u8 reserved_at_0[0x60e0];
};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index a2adf95b3f9c..c54fb96cb1e6 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -70,11 +70,6 @@ static inline void totalram_pages_add(long count)
atomic_long_add(count, &_totalram_pages);
}
-static inline void totalram_pages_set(long val)
-{
- atomic_long_set(&_totalram_pages, val);
-}
-
extern void * high_memory;
extern int page_cluster;
@@ -564,21 +559,6 @@ int vma_is_stack_for_current(struct vm_area_struct *vma);
struct mmu_gather;
struct inode;
-#if !defined(CONFIG_ARCH_HAS_PTE_DEVMAP) || !defined(CONFIG_TRANSPARENT_HUGEPAGE)
-static inline int pmd_devmap(pmd_t pmd)
-{
- return 0;
-}
-static inline int pud_devmap(pud_t pud)
-{
- return 0;
-}
-static inline int pgd_devmap(pgd_t pgd)
-{
- return 0;
-}
-#endif
-
/*
* FIXME: take this include out, include page-flags.h in
* files which need it (119 of them)
@@ -640,24 +620,19 @@ unsigned long vmalloc_to_pfn(const void *addr);
* On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
* is no special casing required.
*/
-static inline bool is_vmalloc_addr(const void *x)
-{
-#ifdef CONFIG_MMU
- unsigned long addr = (unsigned long)x;
-
- return addr >= VMALLOC_START && addr < VMALLOC_END;
-#else
- return false;
-#endif
-}
#ifndef is_ioremap_addr
#define is_ioremap_addr(x) is_vmalloc_addr(x)
#endif
#ifdef CONFIG_MMU
+extern bool is_vmalloc_addr(const void *x);
extern int is_vmalloc_or_module_addr(const void *x);
#else
+static inline bool is_vmalloc_addr(const void *x)
+{
+ return false;
+}
static inline int is_vmalloc_or_module_addr(const void *x)
{
return 0;
@@ -936,10 +911,6 @@ vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#define ZONEID_PGSHIFT (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
-#if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
-#error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
-#endif
-
#define ZONES_MASK ((1UL << ZONES_WIDTH) - 1)
#define NODES_MASK ((1UL << NODES_WIDTH) - 1)
#define SECTIONS_MASK ((1UL << SECTIONS_WIDTH) - 1)
@@ -967,9 +938,10 @@ static inline bool is_zone_device_page(const struct page *page)
#endif
#ifdef CONFIG_DEV_PAGEMAP_OPS
-void __put_devmap_managed_page(struct page *page);
+void free_devmap_managed_page(struct page *page);
DECLARE_STATIC_KEY_FALSE(devmap_managed_key);
-static inline bool put_devmap_managed_page(struct page *page)
+
+static inline bool page_is_devmap_managed(struct page *page)
{
if (!static_branch_unlikely(&devmap_managed_key))
return false;
@@ -978,7 +950,6 @@ static inline bool put_devmap_managed_page(struct page *page)
switch (page->pgmap->type) {
case MEMORY_DEVICE_PRIVATE:
case MEMORY_DEVICE_FS_DAX:
- __put_devmap_managed_page(page);
return true;
default:
break;
@@ -986,11 +957,17 @@ static inline bool put_devmap_managed_page(struct page *page)
return false;
}
+void put_devmap_managed_page(struct page *page);
+
#else /* CONFIG_DEV_PAGEMAP_OPS */
-static inline bool put_devmap_managed_page(struct page *page)
+static inline bool page_is_devmap_managed(struct page *page)
{
return false;
}
+
+static inline void put_devmap_managed_page(struct page *page)
+{
+}
#endif /* CONFIG_DEV_PAGEMAP_OPS */
static inline bool is_device_private_page(const struct page *page)
@@ -1043,37 +1020,37 @@ static inline void put_page(struct page *page)
* need to inform the device driver through callback. See
* include/linux/memremap.h and HMM for details.
*/
- if (put_devmap_managed_page(page))
+ if (page_is_devmap_managed(page)) {
+ put_devmap_managed_page(page);
return;
+ }
if (put_page_testzero(page))
__put_page(page);
}
/**
- * put_user_page() - release a gup-pinned page
+ * unpin_user_page() - release a gup-pinned page
* @page: pointer to page to be released
*
- * Pages that were pinned via get_user_pages*() must be released via
- * either put_user_page(), or one of the put_user_pages*() routines
- * below. This is so that eventually, pages that are pinned via
- * get_user_pages*() can be separately tracked and uniquely handled. In
- * particular, interactions with RDMA and filesystems need special
- * handling.
+ * Pages that were pinned via pin_user_pages*() must be released via either
+ * unpin_user_page(), or one of the unpin_user_pages*() routines. This is so
+ * that eventually such pages can be separately tracked and uniquely handled. In
+ * particular, interactions with RDMA and filesystems need special handling.
*
- * put_user_page() and put_page() are not interchangeable, despite this early
- * implementation that makes them look the same. put_user_page() calls must
- * be perfectly matched up with get_user_page() calls.
+ * unpin_user_page() and put_page() are not interchangeable, despite this early
+ * implementation that makes them look the same. unpin_user_page() calls must
+ * be perfectly matched up with pin*() calls.
*/
-static inline void put_user_page(struct page *page)
+static inline void unpin_user_page(struct page *page)
{
put_page(page);
}
-void put_user_pages_dirty_lock(struct page **pages, unsigned long npages,
- bool make_dirty);
+void unpin_user_pages_dirty_lock(struct page **pages, unsigned long npages,
+ bool make_dirty);
-void put_user_pages(struct page **pages, unsigned long npages);
+void unpin_user_pages(struct page **pages, unsigned long npages);
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
#define SECTION_IN_PAGE_FLAGS
@@ -1521,9 +1498,16 @@ long get_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas, int *locked);
+long pin_user_pages_remote(struct task_struct *tsk, struct mm_struct *mm,
+ unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas, int *locked);
long get_user_pages(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages,
struct vm_area_struct **vmas);
+long pin_user_pages(unsigned long start, unsigned long nr_pages,
+ unsigned int gup_flags, struct page **pages,
+ struct vm_area_struct **vmas);
long get_user_pages_locked(unsigned long start, unsigned long nr_pages,
unsigned int gup_flags, struct page **pages, int *locked);
long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
@@ -1531,6 +1515,8 @@ long get_user_pages_unlocked(unsigned long start, unsigned long nr_pages,
int get_user_pages_fast(unsigned long start, int nr_pages,
unsigned int gup_flags, struct page **pages);
+int pin_user_pages_fast(unsigned long start, int nr_pages,
+ unsigned int gup_flags, struct page **pages);
int account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc);
int __account_locked_vm(struct mm_struct *mm, unsigned long pages, bool inc,
@@ -1643,19 +1629,27 @@ static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
return (unsigned long)val;
}
+void mm_trace_rss_stat(struct mm_struct *mm, int member, long count);
+
static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
{
- atomic_long_add(value, &mm->rss_stat.count[member]);
+ long count = atomic_long_add_return(value, &mm->rss_stat.count[member]);
+
+ mm_trace_rss_stat(mm, member, count);
}
static inline void inc_mm_counter(struct mm_struct *mm, int member)
{
- atomic_long_inc(&mm->rss_stat.count[member]);
+ long count = atomic_long_inc_return(&mm->rss_stat.count[member]);
+
+ mm_trace_rss_stat(mm, member, count);
}
static inline void dec_mm_counter(struct mm_struct *mm, int member)
{
- atomic_long_dec(&mm->rss_stat.count[member]);
+ long count = atomic_long_dec_return(&mm->rss_stat.count[member]);
+
+ mm_trace_rss_stat(mm, member, count);
}
/* Optimized variant when page is already known not to be PageAnon */
@@ -1845,12 +1839,12 @@ static inline void mm_dec_nr_ptes(struct mm_struct *mm) {}
int __pte_alloc(struct mm_struct *mm, pmd_t *pmd);
int __pte_alloc_kernel(pmd_t *pmd);
+#if defined(CONFIG_MMU)
+
/*
- * The following ifdef needed to get the 4level-fixup.h header to work.
- * Remove it when 4level-fixup.h has been removed.
+ * The following ifdef needed to get the 5level-fixup.h header to work.
+ * Remove it when 5level-fixup.h has been removed.
*/
-#if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
-
#ifndef __ARCH_HAS_5LEVEL_HACK
static inline p4d_t *p4d_alloc(struct mm_struct *mm, pgd_t *pgd,
unsigned long address)
@@ -1872,7 +1866,7 @@ static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long a
return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
NULL: pmd_offset(pud, address);
}
-#endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
+#endif /* CONFIG_MMU */
#if USE_SPLIT_PTE_PTLOCKS
#if ALLOC_SPLIT_PTLOCKS
@@ -2188,12 +2182,6 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn,
struct mminit_pfnnid_cache *state);
#endif
-#if !defined(CONFIG_FLAT_NODE_MEM_MAP)
-void zero_resv_unavail(void);
-#else
-static inline void zero_resv_unavail(void) {}
-#endif
-
extern void set_dma_reserve(unsigned long new_dma_reserve);
extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long,
enum memmap_context, struct vmem_altmap *);
@@ -2214,9 +2202,6 @@ void warn_alloc(gfp_t gfp_mask, nodemask_t *nodemask, const char *fmt, ...);
extern void setup_per_cpu_pageset(void);
-extern void zone_pcp_update(struct zone *zone);
-extern void zone_pcp_reset(struct zone *zone);
-
/* page_alloc.c */
extern int min_free_kbytes;
extern int watermark_boost_factor;
@@ -2338,6 +2323,7 @@ extern int __do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf, bool downgrade);
extern int do_munmap(struct mm_struct *, unsigned long, size_t,
struct list_head *uf);
+extern int do_madvise(unsigned long start, size_t len_in, int behavior);
static inline unsigned long
do_mmap_pgoff(struct file *file, unsigned long addr,
@@ -2543,6 +2529,8 @@ vm_fault_t vmf_insert_pfn_prot(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, pgprot_t pgprot);
vm_fault_t vmf_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
pfn_t pfn);
+vm_fault_t vmf_insert_mixed_prot(struct vm_area_struct *vma, unsigned long addr,
+ pfn_t pfn, pgprot_t pgprot);
vm_fault_t vmf_insert_mixed_mkwrite(struct vm_area_struct *vma,
unsigned long addr, pfn_t pfn);
int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
@@ -2589,13 +2577,15 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
#define FOLL_ANON 0x8000 /* don't do file mappings */
#define FOLL_LONGTERM 0x10000 /* mapping lifetime is indefinite: see below */
#define FOLL_SPLIT_PMD 0x20000 /* split huge pmd before returning */
+#define FOLL_PIN 0x40000 /* pages must be released via unpin_user_page */
/*
- * NOTE on FOLL_LONGTERM:
+ * FOLL_PIN and FOLL_LONGTERM may be used in various combinations with each
+ * other. Here is what they mean, and how to use them:
*
* FOLL_LONGTERM indicates that the page will be held for an indefinite time
- * period _often_ under userspace control. This is contrasted with
- * iov_iter_get_pages() where usages which are transient.
+ * period _often_ under userspace control. This is in contrast to
+ * iov_iter_get_pages(), whose usages are transient.
*
* FIXME: For pages which are part of a filesystem, mappings are subject to the
* lifetime enforced by the filesystem and we need guarantees that longterm
@@ -2610,11 +2600,39 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address,
* Currently only get_user_pages() and get_user_pages_fast() support this flag
* and calls to get_user_pages_[un]locked are specifically not allowed. This
* is due to an incompatibility with the FS DAX check and
- * FAULT_FLAG_ALLOW_RETRY
+ * FAULT_FLAG_ALLOW_RETRY.
*
- * In the CMA case: longterm pins in a CMA region would unnecessarily fragment
- * that region. And so CMA attempts to migrate the page before pinning when
+ * In the CMA case: long term pins in a CMA region would unnecessarily fragment
+ * that region. And so, CMA attempts to migrate the page before pinning, when
* FOLL_LONGTERM is specified.
+ *
+ * FOLL_PIN indicates that a special kind of tracking (not just page->_refcount,
+ * but an additional pin counting system) will be invoked. This is intended for
+ * anything that gets a page reference and then touches page data (for example,
+ * Direct IO). This lets the filesystem know that some non-file-system entity is
+ * potentially changing the pages' data. In contrast to FOLL_GET (whose pages
+ * are released via put_page()), FOLL_PIN pages must be released, ultimately, by
+ * a call to unpin_user_page().
+ *
+ * FOLL_PIN is similar to FOLL_GET: both of these pin pages. They use different
+ * and separate refcounting mechanisms, however, and that means that each has
+ * its own acquire and release mechanisms:
+ *
+ * FOLL_GET: get_user_pages*() to acquire, and put_page() to release.
+ *
+ * FOLL_PIN: pin_user_pages*() to acquire, and unpin_user_pages to release.
+ *
+ * FOLL_PIN and FOLL_GET are mutually exclusive for a given function call.
+ * (The underlying pages may experience both FOLL_GET-based and FOLL_PIN-based
+ * calls applied to them, and that's perfectly OK. This is a constraint on the
+ * callers, not on the pages.)
+ *
+ * FOLL_PIN should be set internally by the pin_user_pages*() APIs, never
+ * directly by the caller. That's in order to help avoid mismatches when
+ * releasing pages: get_user_pages*() pages must be released via put_page(),
+ * while pin_user_pages*() pages must be released via unpin_user_page().
+ *
+ * Please see Documentation/vm/pin_user_pages.rst for more information.
*/
static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
@@ -2631,7 +2649,9 @@ static inline int vm_fault_to_errno(vm_fault_t vm_fault, int foll_flags)
typedef int (*pte_fn_t)(pte_t *pte, unsigned long addr, void *data);
extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
unsigned long size, pte_fn_t fn, void *data);
-
+extern int apply_to_existing_page_range(struct mm_struct *mm,
+ unsigned long address, unsigned long size,
+ pte_fn_t fn, void *data);
#ifdef CONFIG_PAGE_POISONING
extern bool page_poisoning_enabled(void);
@@ -2666,14 +2686,26 @@ static inline bool want_init_on_free(void)
!page_poisoning_enabled();
}
-#ifdef CONFIG_DEBUG_PAGEALLOC_ENABLE_DEFAULT
-DECLARE_STATIC_KEY_TRUE(_debug_pagealloc_enabled);
+#ifdef CONFIG_DEBUG_PAGEALLOC
+extern void init_debug_pagealloc(void);
#else
-DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
+static inline void init_debug_pagealloc(void) {}
#endif
+extern bool _debug_pagealloc_enabled_early;
+DECLARE_STATIC_KEY_FALSE(_debug_pagealloc_enabled);
static inline bool debug_pagealloc_enabled(void)
{
+ return IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
+ _debug_pagealloc_enabled_early;
+}
+
+/*
+ * For use in fast paths after init_debug_pagealloc() has run, or when a
+ * false negative result is not harmful when called too early.
+ */
+static inline bool debug_pagealloc_enabled_static(void)
+{
if (!IS_ENABLED(CONFIG_DEBUG_PAGEALLOC))
return false;
@@ -2683,6 +2715,10 @@ static inline bool debug_pagealloc_enabled(void)
#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_ARCH_HAS_SET_DIRECT_MAP)
extern void __kernel_map_pages(struct page *page, int numpages, int enable);
+/*
+ * When called in DEBUG_PAGEALLOC context, the call should most likely be
+ * guarded by debug_pagealloc_enabled() or debug_pagealloc_enabled_static()
+ */
static inline void
kernel_map_pages(struct page *page, int numpages, int enable)
{
@@ -2781,7 +2817,7 @@ extern int sysctl_memory_failure_early_kill;
extern int sysctl_memory_failure_recovery;
extern void shake_page(struct page *p, int access);
extern atomic_long_t num_poisoned_pages __read_mostly;
-extern int soft_offline_page(struct page *page, int flags);
+extern int soft_offline_page(unsigned long pfn, int flags);
/*
@@ -2873,5 +2909,17 @@ static inline int pages_identical(struct page *page1, struct page *page2)
return !memcmp_pages(page1, page2);
}
+#ifdef CONFIG_MAPPING_DIRTY_HELPERS
+unsigned long clean_record_shared_mapping_range(struct address_space *mapping,
+ pgoff_t first_index, pgoff_t nr,
+ pgoff_t bitmap_pgoff,
+ unsigned long *bitmap,
+ pgoff_t *start,
+ pgoff_t *end);
+
+unsigned long wp_shared_mapping_range(struct address_space *mapping,
+ pgoff_t first_index, pgoff_t nr);
+#endif
+
#endif /* __KERNEL__ */
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 270aa8fd2800..c28911c3afa8 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -312,7 +312,12 @@ struct vm_area_struct {
/* Second cache line starts here. */
struct mm_struct *vm_mm; /* The address space we belong to. */
- pgprot_t vm_page_prot; /* Access permissions of this VMA. */
+
+ /*
+ * Access permissions of this VMA.
+ * See vmf_insert_mixed_prot() for discussion.
+ */
+ pgprot_t vm_page_prot;
unsigned long vm_flags; /* Flags, see mm.h. */
/*
@@ -490,7 +495,7 @@ struct mm_struct {
/* store ref to file /proc/<pid>/exe symlink points to */
struct file __rcu *exe_file;
#ifdef CONFIG_MMU_NOTIFIER
- struct mmu_notifier_mm *mmu_notifier_mm;
+ struct mmu_notifier_subscriptions *notifier_subscriptions;
#endif
#if defined(CONFIG_TRANSPARENT_HUGEPAGE) && !USE_SPLIT_PMD_PTLOCKS
pgtable_t pmd_huge_pte; /* protected by page_table_lock */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 9b6336ad3266..cf3780a6ccc4 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -226,7 +226,7 @@ struct mmc_queue_req;
* MMC Physical partitions
*/
struct mmc_part {
- unsigned int size; /* partition size (in bytes) */
+ u64 size; /* partition size (in bytes) */
unsigned int part_cfg; /* partition type */
char name[MAX_MMC_PART_NAME_LEN];
bool force_ro; /* to make boot parts RO by default */
@@ -291,6 +291,7 @@ struct mmc_card {
struct sd_switch_caps sw_caps; /* switch (CMD6) caps */
unsigned int sdio_funcs; /* number of SDIO functions */
+ atomic_t sdio_funcs_probed; /* number of probed SDIO funcs */
struct sdio_cccr cccr; /* common card info */
struct sdio_cis cis; /* common tuple info */
struct sdio_func *sdio_func[SDIO_MAX_FUNCS]; /* SDIO functions (devices) */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index ba703384bea0..4c5eb3aa8e72 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -333,6 +333,7 @@ struct mmc_host {
MMC_CAP_UHS_SDR50 | MMC_CAP_UHS_SDR104 | \
MMC_CAP_UHS_DDR50)
#define MMC_CAP_SYNC_RUNTIME_PM (1 << 21) /* Synced runtime PM suspends. */
+#define MMC_CAP_NEED_RSP_BUSY (1 << 22) /* Commands with R1B can't use R1. */
#define MMC_CAP_DRIVER_TYPE_A (1 << 23) /* Host supports Driver Type A */
#define MMC_CAP_DRIVER_TYPE_C (1 << 24) /* Host supports Driver Type C */
#define MMC_CAP_DRIVER_TYPE_D (1 << 25) /* Host supports Driver Type D */
diff --git a/include/linux/mmc/sdio_ids.h b/include/linux/mmc/sdio_ids.h
index d1a5d5df02f5..2e9a6e4634eb 100644
--- a/include/linux/mmc/sdio_ids.h
+++ b/include/linux/mmc/sdio_ids.h
@@ -41,8 +41,10 @@
#define SDIO_DEVICE_ID_BROADCOM_43455 0xa9bf
#define SDIO_DEVICE_ID_BROADCOM_4354 0x4354
#define SDIO_DEVICE_ID_BROADCOM_4356 0x4356
+#define SDIO_DEVICE_ID_BROADCOM_4359 0x4359
#define SDIO_DEVICE_ID_CYPRESS_4373 0x4373
#define SDIO_DEVICE_ID_CYPRESS_43012 43012
+#define SDIO_DEVICE_ID_CYPRESS_89359 0x4355
#define SDIO_VENDOR_ID_INTEL 0x0089
#define SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX 0x1402
@@ -71,6 +73,8 @@
#define SDIO_VENDOR_ID_TI 0x0097
#define SDIO_DEVICE_ID_TI_WL1271 0x4076
+#define SDIO_VENDOR_ID_TI_WL1251 0x104c
+#define SDIO_DEVICE_ID_TI_WL1251 0x9066
#define SDIO_VENDOR_ID_STE 0x0020
#define SDIO_DEVICE_ID_STE_CW1200 0x2280
diff --git a/include/linux/mmc/slot-gpio.h b/include/linux/mmc/slot-gpio.h
index 0de3d7c016cd..4ae2f2908f99 100644
--- a/include/linux/mmc/slot-gpio.h
+++ b/include/linux/mmc/slot-gpio.h
@@ -17,10 +17,9 @@ int mmc_gpio_get_ro(struct mmc_host *host);
int mmc_gpio_get_cd(struct mmc_host *host);
int mmc_gpiod_request_cd(struct mmc_host *host, const char *con_id,
unsigned int idx, bool override_active_level,
- unsigned int debounce, bool *gpio_invert);
+ unsigned int debounce);
int mmc_gpiod_request_ro(struct mmc_host *host, const char *con_id,
- unsigned int idx,
- unsigned int debounce, bool *gpio_invert);
+ unsigned int idx, unsigned int debounce);
void mmc_gpio_set_cd_isr(struct mmc_host *host,
irqreturn_t (*isr)(int irq, void *dev_id));
int mmc_gpio_set_cd_wake(struct mmc_host *host, bool on);
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 1bd8e6a09a3c..736f6918335e 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -6,9 +6,12 @@
#include <linux/spinlock.h>
#include <linux/mm_types.h>
#include <linux/srcu.h>
+#include <linux/interval_tree.h>
+struct mmu_notifier_subscriptions;
struct mmu_notifier;
-struct mmu_notifier_ops;
+struct mmu_notifier_range;
+struct mmu_interval_notifier;
/**
* enum mmu_notifier_event - reason for the mmu notifier callback
@@ -31,6 +34,9 @@ struct mmu_notifier_ops;
* access flags). User should soft dirty the page in the end callback to make
* sure that anyone relying on soft dirtyness catch pages that might be written
* through non CPU mappings.
+ *
+ * @MMU_NOTIFY_RELEASE: used during mmu_interval_notifier invalidate to signal
+ * that the mm refcount is zero and the range is no longer accessible.
*/
enum mmu_notifier_event {
MMU_NOTIFY_UNMAP = 0,
@@ -38,38 +44,11 @@ enum mmu_notifier_event {
MMU_NOTIFY_PROTECTION_VMA,
MMU_NOTIFY_PROTECTION_PAGE,
MMU_NOTIFY_SOFT_DIRTY,
-};
-
-#ifdef CONFIG_MMU_NOTIFIER
-
-#ifdef CONFIG_LOCKDEP
-extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
-#endif
-
-/*
- * The mmu notifier_mm structure is allocated and installed in
- * mm->mmu_notifier_mm inside the mm_take_all_locks() protected
- * critical section and it's released only when mm_count reaches zero
- * in mmdrop().
- */
-struct mmu_notifier_mm {
- /* all mmu notifiers registerd in this mm are queued in this list */
- struct hlist_head list;
- /* to serialize the list modifications and hlist_unhashed */
- spinlock_t lock;
+ MMU_NOTIFY_RELEASE,
};
#define MMU_NOTIFIER_RANGE_BLOCKABLE (1 << 0)
-struct mmu_notifier_range {
- struct vm_area_struct *vma;
- struct mm_struct *mm;
- unsigned long start;
- unsigned long end;
- unsigned flags;
- enum mmu_notifier_event event;
-};
-
struct mmu_notifier_ops {
/*
* Called either by mmu_notifier_unregister or when the mm is
@@ -94,7 +73,7 @@ struct mmu_notifier_ops {
* through the gart alias address, so leading to memory
* corruption.
*/
- void (*release)(struct mmu_notifier *mn,
+ void (*release)(struct mmu_notifier *subscription,
struct mm_struct *mm);
/*
@@ -106,7 +85,7 @@ struct mmu_notifier_ops {
* Start-end is necessary in case the secondary MMU is mapping the page
* at a smaller granularity than the primary MMU.
*/
- int (*clear_flush_young)(struct mmu_notifier *mn,
+ int (*clear_flush_young)(struct mmu_notifier *subscription,
struct mm_struct *mm,
unsigned long start,
unsigned long end);
@@ -116,7 +95,7 @@ struct mmu_notifier_ops {
* latter, it is supposed to test-and-clear the young/accessed bitflag
* in the secondary pte, but it may omit flushing the secondary tlb.
*/
- int (*clear_young)(struct mmu_notifier *mn,
+ int (*clear_young)(struct mmu_notifier *subscription,
struct mm_struct *mm,
unsigned long start,
unsigned long end);
@@ -127,7 +106,7 @@ struct mmu_notifier_ops {
* frequently used without actually clearing the flag or tearing
* down the secondary mapping on the page.
*/
- int (*test_young)(struct mmu_notifier *mn,
+ int (*test_young)(struct mmu_notifier *subscription,
struct mm_struct *mm,
unsigned long address);
@@ -135,7 +114,7 @@ struct mmu_notifier_ops {
* change_pte is called in cases that pte mapping to page is changed:
* for example, when ksm remaps pte to point to a new shared page.
*/
- void (*change_pte)(struct mmu_notifier *mn,
+ void (*change_pte)(struct mmu_notifier *subscription,
struct mm_struct *mm,
unsigned long address,
pte_t pte);
@@ -190,9 +169,9 @@ struct mmu_notifier_ops {
* invalidate_range_end.
*
*/
- int (*invalidate_range_start)(struct mmu_notifier *mn,
+ int (*invalidate_range_start)(struct mmu_notifier *subscription,
const struct mmu_notifier_range *range);
- void (*invalidate_range_end)(struct mmu_notifier *mn,
+ void (*invalidate_range_end)(struct mmu_notifier *subscription,
const struct mmu_notifier_range *range);
/*
@@ -213,8 +192,10 @@ struct mmu_notifier_ops {
* of what was passed to invalidate_range_start()/end(), if
* called between those functions.
*/
- void (*invalidate_range)(struct mmu_notifier *mn, struct mm_struct *mm,
- unsigned long start, unsigned long end);
+ void (*invalidate_range)(struct mmu_notifier *subscription,
+ struct mm_struct *mm,
+ unsigned long start,
+ unsigned long end);
/*
* These callbacks are used with the get/put interface to manage the
@@ -227,7 +208,7 @@ struct mmu_notifier_ops {
* and cannot sleep.
*/
struct mmu_notifier *(*alloc_notifier)(struct mm_struct *mm);
- void (*free_notifier)(struct mmu_notifier *mn);
+ void (*free_notifier)(struct mmu_notifier *subscription);
};
/*
@@ -249,9 +230,44 @@ struct mmu_notifier {
unsigned int users;
};
+/**
+ * struct mmu_interval_notifier_ops
+ * @invalidate: Upon return the caller must stop using any SPTEs within this
+ * range. This function can sleep. Return false only if sleeping
+ * was required but mmu_notifier_range_blockable(range) is false.
+ */
+struct mmu_interval_notifier_ops {
+ bool (*invalidate)(struct mmu_interval_notifier *interval_sub,
+ const struct mmu_notifier_range *range,
+ unsigned long cur_seq);
+};
+
+struct mmu_interval_notifier {
+ struct interval_tree_node interval_tree;
+ const struct mmu_interval_notifier_ops *ops;
+ struct mm_struct *mm;
+ struct hlist_node deferred_item;
+ unsigned long invalidate_seq;
+};
+
+#ifdef CONFIG_MMU_NOTIFIER
+
+#ifdef CONFIG_LOCKDEP
+extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
+#endif
+
+struct mmu_notifier_range {
+ struct vm_area_struct *vma;
+ struct mm_struct *mm;
+ unsigned long start;
+ unsigned long end;
+ unsigned flags;
+ enum mmu_notifier_event event;
+};
+
static inline int mm_has_notifiers(struct mm_struct *mm)
{
- return unlikely(mm->mmu_notifier_mm);
+ return unlikely(mm->notifier_subscriptions);
}
struct mmu_notifier *mmu_notifier_get_locked(const struct mmu_notifier_ops *ops,
@@ -266,16 +282,95 @@ mmu_notifier_get(const struct mmu_notifier_ops *ops, struct mm_struct *mm)
up_write(&mm->mmap_sem);
return ret;
}
-void mmu_notifier_put(struct mmu_notifier *mn);
+void mmu_notifier_put(struct mmu_notifier *subscription);
void mmu_notifier_synchronize(void);
-extern int mmu_notifier_register(struct mmu_notifier *mn,
+extern int mmu_notifier_register(struct mmu_notifier *subscription,
struct mm_struct *mm);
-extern int __mmu_notifier_register(struct mmu_notifier *mn,
+extern int __mmu_notifier_register(struct mmu_notifier *subscription,
struct mm_struct *mm);
-extern void mmu_notifier_unregister(struct mmu_notifier *mn,
+extern void mmu_notifier_unregister(struct mmu_notifier *subscription,
struct mm_struct *mm);
-extern void __mmu_notifier_mm_destroy(struct mm_struct *mm);
+
+unsigned long
+mmu_interval_read_begin(struct mmu_interval_notifier *interval_sub);
+int mmu_interval_notifier_insert(struct mmu_interval_notifier *interval_sub,
+ struct mm_struct *mm, unsigned long start,
+ unsigned long length,
+ const struct mmu_interval_notifier_ops *ops);
+int mmu_interval_notifier_insert_locked(
+ struct mmu_interval_notifier *interval_sub, struct mm_struct *mm,
+ unsigned long start, unsigned long length,
+ const struct mmu_interval_notifier_ops *ops);
+void mmu_interval_notifier_remove(struct mmu_interval_notifier *interval_sub);
+
+/**
+ * mmu_interval_set_seq - Save the invalidation sequence
+ * @interval_sub - The subscription passed to invalidate
+ * @cur_seq - The cur_seq passed to the invalidate() callback
+ *
+ * This must be called unconditionally from the invalidate callback of a
+ * struct mmu_interval_notifier_ops under the same lock that is used to call
+ * mmu_interval_read_retry(). It updates the sequence number for later use by
+ * mmu_interval_read_retry(). The provided cur_seq will always be odd.
+ *
+ * If the caller does not call mmu_interval_read_begin() or
+ * mmu_interval_read_retry() then this call is not required.
+ */
+static inline void
+mmu_interval_set_seq(struct mmu_interval_notifier *interval_sub,
+ unsigned long cur_seq)
+{
+ WRITE_ONCE(interval_sub->invalidate_seq, cur_seq);
+}
+
+/**
+ * mmu_interval_read_retry - End a read side critical section against a VA range
+ * interval_sub: The subscription
+ * seq: The return of the paired mmu_interval_read_begin()
+ *
+ * This MUST be called under a user provided lock that is also held
+ * unconditionally by op->invalidate() when it calls mmu_interval_set_seq().
+ *
+ * Each call should be paired with a single mmu_interval_read_begin() and
+ * should be used to conclude the read side.
+ *
+ * Returns true if an invalidation collided with this critical section, and
+ * the caller should retry.
+ */
+static inline bool
+mmu_interval_read_retry(struct mmu_interval_notifier *interval_sub,
+ unsigned long seq)
+{
+ return interval_sub->invalidate_seq != seq;
+}
+
+/**
+ * mmu_interval_check_retry - Test if a collision has occurred
+ * interval_sub: The subscription
+ * seq: The return of the matching mmu_interval_read_begin()
+ *
+ * This can be used in the critical section between mmu_interval_read_begin()
+ * and mmu_interval_read_retry(). A return of true indicates an invalidation
+ * has collided with this critical region and a future
+ * mmu_interval_read_retry() will return true.
+ *
+ * False is not reliable and only suggests a collision may not have
+ * occured. It can be called many times and does not have to hold the user
+ * provided lock.
+ *
+ * This call can be used as part of loops and other expensive operations to
+ * expedite a retry.
+ */
+static inline bool
+mmu_interval_check_retry(struct mmu_interval_notifier *interval_sub,
+ unsigned long seq)
+{
+ /* Pairs with the WRITE_ONCE in mmu_interval_set_seq() */
+ return READ_ONCE(interval_sub->invalidate_seq) != seq;
+}
+
+extern void __mmu_notifier_subscriptions_destroy(struct mm_struct *mm);
extern void __mmu_notifier_release(struct mm_struct *mm);
extern int __mmu_notifier_clear_flush_young(struct mm_struct *mm,
unsigned long start,
@@ -391,15 +486,15 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
__mmu_notifier_invalidate_range(mm, start, end);
}
-static inline void mmu_notifier_mm_init(struct mm_struct *mm)
+static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
{
- mm->mmu_notifier_mm = NULL;
+ mm->notifier_subscriptions = NULL;
}
-static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
+static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
{
if (mm_has_notifiers(mm))
- __mmu_notifier_mm_destroy(mm);
+ __mmu_notifier_subscriptions_destroy(mm);
}
@@ -603,11 +698,11 @@ static inline void mmu_notifier_invalidate_range(struct mm_struct *mm,
{
}
-static inline void mmu_notifier_mm_init(struct mm_struct *mm)
+static inline void mmu_notifier_subscriptions_init(struct mm_struct *mm)
{
}
-static inline void mmu_notifier_mm_destroy(struct mm_struct *mm)
+static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
{
}
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index bda20282746b..462f6873905a 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -215,9 +215,8 @@ enum node_stat_item {
NR_INACTIVE_FILE, /* " " " " " */
NR_ACTIVE_FILE, /* " " " " " */
NR_UNEVICTABLE, /* " " " " " */
- NR_SLAB_RECLAIMABLE, /* Please do not reorder this item */
- NR_SLAB_UNRECLAIMABLE, /* and this one without looking at
- * memcg_flush_percpu_vmstats() first. */
+ NR_SLAB_RECLAIMABLE,
+ NR_SLAB_UNRECLAIMABLE,
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
WORKINGSET_NODES,
@@ -273,12 +272,12 @@ enum lru_list {
#define for_each_evictable_lru(lru) for (lru = 0; lru <= LRU_ACTIVE_FILE; lru++)
-static inline int is_file_lru(enum lru_list lru)
+static inline bool is_file_lru(enum lru_list lru)
{
return (lru == LRU_INACTIVE_FILE || lru == LRU_ACTIVE_FILE);
}
-static inline int is_active_lru(enum lru_list lru)
+static inline bool is_active_lru(enum lru_list lru)
{
return (lru == LRU_ACTIVE_ANON || lru == LRU_ACTIVE_FILE);
}
@@ -296,6 +295,12 @@ struct zone_reclaim_stat {
unsigned long recent_scanned[2];
};
+enum lruvec_flags {
+ LRUVEC_CONGESTED, /* lruvec has many dirty pages
+ * backed by a congested BDI
+ */
+};
+
struct lruvec {
struct list_head lists[NR_LRU_LISTS];
struct zone_reclaim_stat reclaim_stat;
@@ -303,12 +308,14 @@ struct lruvec {
atomic_long_t inactive_age;
/* Refaults at the time of last reclaim cycle */
unsigned long refaults;
+ /* Various lruvec state flags (enum lruvec_flags) */
+ unsigned long flags;
#ifdef CONFIG_MEMCG
struct pglist_data *pgdat;
#endif
};
-/* Isolate unmapped file */
+/* Isolate unmapped pages */
#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
/* Isolate for asynchronous migration */
#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
@@ -359,33 +366,40 @@ struct per_cpu_nodestat {
#endif /* !__GENERATING_BOUNDS.H */
enum zone_type {
-#ifdef CONFIG_ZONE_DMA
/*
- * ZONE_DMA is used when there are devices that are not able
- * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
- * carve out the portion of memory that is needed for these devices.
- * The range is arch specific.
+ * ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able
+ * to DMA to all of the addressable memory (ZONE_NORMAL).
+ * On architectures where this area covers the whole 32 bit address
+ * space ZONE_DMA32 is used. ZONE_DMA is left for the ones with smaller
+ * DMA addressing constraints. This distinction is important as a 32bit
+ * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit
+ * platforms may need both zones as they support peripherals with
+ * different DMA addressing limitations.
+ *
+ * Some examples:
+ *
+ * - i386 and x86_64 have a fixed 16M ZONE_DMA and ZONE_DMA32 for the
+ * rest of the lower 4G.
+ *
+ * - arm only uses ZONE_DMA, the size, up to 4G, may vary depending on
+ * the specific device.
*
- * Some examples
+ * - arm64 has a fixed 1G ZONE_DMA and ZONE_DMA32 for the rest of the
+ * lower 4G.
*
- * Architecture Limit
- * ---------------------------
- * parisc, ia64, sparc <4G
- * s390, powerpc <2G
- * arm Various
- * alpha Unlimited or 0-16MB.
+ * - powerpc only uses ZONE_DMA, the size, up to 2G, may vary
+ * depending on the specific device.
*
- * i386, x86_64 and multiple other arches
- * <16M.
+ * - s390 uses ZONE_DMA fixed to the lower 2G.
+ *
+ * - ia64 and riscv only use ZONE_DMA32.
+ *
+ * - parisc uses neither.
*/
+#ifdef CONFIG_ZONE_DMA
ZONE_DMA,
#endif
#ifdef CONFIG_ZONE_DMA32
- /*
- * x86_64 needs two ZONE_DMAs because it supports devices that are
- * only able to do DMA to the lower 16M but also 32 bit devices that
- * can only do DMA areas below 4G.
- */
ZONE_DMA32,
#endif
/*
@@ -565,9 +579,6 @@ struct zone {
} ____cacheline_internodealigned_in_smp;
enum pgdat_flags {
- PGDAT_CONGESTED, /* pgdat has many dirty pages backed by
- * a congested BDI
- */
PGDAT_DIRTY, /* reclaim scanning has recently found
* many dirty file pages at the tail
* of the LRU.
@@ -747,7 +758,7 @@ typedef struct pglist_data {
#ifdef CONFIG_NUMA
/*
- * zone reclaim becomes active if more unmapped pages exist.
+ * node reclaim becomes active if more unmapped pages exist.
*/
unsigned long min_unmapped_pages;
unsigned long min_slab_pages;
@@ -770,7 +781,13 @@ typedef struct pglist_data {
#endif
/* Fields commonly accessed by the page reclaim scanner */
- struct lruvec lruvec;
+
+ /*
+ * NOTE: THIS IS UNUSED IF MEMCG IS ENABLED.
+ *
+ * Use mem_cgroup_lruvec() to look up lruvecs.
+ */
+ struct lruvec __lruvec;
unsigned long flags;
@@ -793,11 +810,6 @@ typedef struct pglist_data {
#define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn)
#define node_end_pfn(nid) pgdat_end_pfn(NODE_DATA(nid))
-static inline struct lruvec *node_lruvec(struct pglist_data *pgdat)
-{
- return &pgdat->lruvec;
-}
-
static inline unsigned long pgdat_end_pfn(pg_data_t *pgdat)
{
return pgdat->node_start_pfn + pgdat->node_spanned_pages;
@@ -835,7 +847,7 @@ static inline struct pglist_data *lruvec_pgdat(struct lruvec *lruvec)
#ifdef CONFIG_MEMCG
return lruvec->pgdat;
#else
- return container_of(lruvec, struct pglist_data, lruvec);
+ return container_of(lruvec, struct pglist_data, __lruvec);
#endif
}
@@ -1072,7 +1084,7 @@ static inline struct zoneref *first_zones_zonelist(struct zonelist *zonelist,
/**
* for_each_zone_zonelist_nodemask - helper macro to iterate over valid zones in a zonelist at or below a given zone index and within a nodemask
* @zone - The current zone in the iterator
- * @z - The current pointer within zonelist->zones being iterated
+ * @z - The current pointer within zonelist->_zonerefs being iterated
* @zlist - The zonelist being iterated
* @highidx - The zone index of the highest zone to return
* @nodemask - Nodemask allowed by the allocator
@@ -1367,6 +1379,16 @@ static inline int pfn_present(unsigned long pfn)
return present_section(__nr_to_section(pfn_to_section_nr(pfn)));
}
+static inline unsigned long next_present_section_nr(unsigned long section_nr)
+{
+ while (++section_nr <= __highest_present_section_nr) {
+ if (present_section_nr(section_nr))
+ return section_nr;
+ }
+
+ return -1;
+}
+
/*
* These are _only_ used during initialisation, therefore they
* can use __initdata ... They could have names to indicate
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 5714fd35a83c..f8b66d43acf6 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -587,9 +587,9 @@ struct platform_device_id {
#define MDIO_NAME_SIZE 32
#define MDIO_MODULE_PREFIX "mdio:"
-#define MDIO_ID_FMT "%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d%d"
+#define MDIO_ID_FMT "%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u%u"
#define MDIO_ID_ARGS(_id) \
- (_id)>>31, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \
+ ((_id)>>31) & 1, ((_id)>>30) & 1, ((_id)>>29) & 1, ((_id)>>28) & 1, \
((_id)>>27) & 1, ((_id)>>26) & 1, ((_id)>>25) & 1, ((_id)>>24) & 1, \
((_id)>>23) & 1, ((_id)>>22) & 1, ((_id)>>21) & 1, ((_id)>>20) & 1, \
((_id)>>19) & 1, ((_id)>>18) & 1, ((_id)>>17) & 1, ((_id)>>16) & 1, \
@@ -667,9 +667,7 @@ struct x86_cpu_id {
kernel_ulong_t driver_data;
};
-#define X86_FEATURE_MATCH(x) \
- { X86_VENDOR_ANY, X86_FAMILY_ANY, X86_MODEL_ANY, x }
-
+/* Wild cards for x86_cpu_id::vendor, family, model and feature */
#define X86_VENDOR_ANY 0xffff
#define X86_FAMILY_ANY 0
#define X86_MODEL_ANY 0
diff --git a/include/linux/module.h b/include/linux/module.h
index 6d20895e7739..1ad393e62bef 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -1,11 +1,14 @@
-#ifndef _LINUX_MODULE_H
-#define _LINUX_MODULE_H
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Dynamic loading of modules into the kernel.
*
* Rewritten by Richard Henderson <rth@tamu.edu> Dec 1996
* Rewritten again by Rusty Russell, 2002
*/
+
+#ifndef _LINUX_MODULE_H
+#define _LINUX_MODULE_H
+
#include <linux/list.h>
#include <linux/stat.h>
#include <linux/compiler.h>
@@ -167,6 +170,16 @@ extern void cleanup_module(void);
#define MODULE_SOFTDEP(_softdep) MODULE_INFO(softdep, _softdep)
/*
+ * MODULE_FILE is used for generating modules.builtin
+ * So, make it no-op when this is being built as a module
+ */
+#ifdef MODULE
+#define MODULE_FILE
+#else
+#define MODULE_FILE MODULE_INFO(file, KBUILD_MODFILE);
+#endif
+
+/*
* The following license idents are currently accepted as indicating free
* software modules
*
@@ -210,7 +223,7 @@ extern void cleanup_module(void);
* 2. So the community can ignore bug reports including proprietary modules
* 3. So vendors can do likewise based on their own policies
*/
-#define MODULE_LICENSE(_license) MODULE_INFO(license, _license)
+#define MODULE_LICENSE(_license) MODULE_FILE MODULE_INFO(license, _license)
/*
* Author(s), use "Name <email>" or just "Name", for multiple
@@ -426,7 +439,7 @@ struct module {
#ifdef CONFIG_KALLSYMS
/* Protected by RCU and/or module_mutex: use rcu_dereference() */
- struct mod_kallsyms *kallsyms;
+ struct mod_kallsyms __rcu *kallsyms;
struct mod_kallsyms core_kallsyms;
/* Section attributes */
@@ -846,13 +859,9 @@ extern int module_sysfs_initialized;
#define __MODULE_STRING(x) __stringify(x)
#ifdef CONFIG_STRICT_MODULE_RWX
-extern void set_all_modules_text_rw(void);
-extern void set_all_modules_text_ro(void);
extern void module_enable_ro(const struct module *mod, bool after_init);
extern void module_disable_ro(const struct module *mod);
#else
-static inline void set_all_modules_text_rw(void) { }
-static inline void set_all_modules_text_ro(void) { }
static inline void module_enable_ro(const struct module *mod, bool after_init) { }
static inline void module_disable_ro(const struct module *mod) { }
#endif
diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
index 5229c18025e9..ca92aea8a6bd 100644
--- a/include/linux/moduleloader.h
+++ b/include/linux/moduleloader.h
@@ -91,7 +91,7 @@ void module_arch_cleanup(struct module *mod);
/* Any cleanup before freeing mod->module_init */
void module_arch_freeing_init(struct module *mod);
-#ifdef CONFIG_KASAN
+#if defined(CONFIG_KASAN) && !defined(CONFIG_KASAN_VMALLOC)
#include <linux/kasan.h>
#define MODULE_ALIGN (PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT)
#else
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 5ba250d9172a..3ef917ff0964 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -100,11 +100,11 @@ struct kparam_array
/**
* module_param - typesafe helper for a module/cmdline parameter
- * @value: the variable to alter, and exposed parameter name.
+ * @name: the variable to alter, and exposed parameter name.
* @type: the type of the parameter
* @perm: visibility in sysfs.
*
- * @value becomes the module parameter, or (prefixed by KBUILD_MODNAME and a
+ * @name becomes the module parameter, or (prefixed by KBUILD_MODNAME and a
* ".") the kernel commandline parameter. Note that - is changed to _, so
* the user can use "foo-bar=1" even for variable "foo_bar".
*
@@ -128,6 +128,9 @@ struct kparam_array
/**
* module_param_unsafe - same as module_param but taints kernel
+ * @name: the variable to alter, and exposed parameter name.
+ * @type: the type of the parameter
+ * @perm: visibility in sysfs.
*/
#define module_param_unsafe(name, type, perm) \
module_param_named_unsafe(name, name, type, perm)
@@ -150,6 +153,10 @@ struct kparam_array
/**
* module_param_named_unsafe - same as module_param_named but taints kernel
+ * @name: a valid C identifier which is the parameter name.
+ * @value: the actual lvalue to alter.
+ * @type: the type of the parameter
+ * @perm: visibility in sysfs.
*/
#define module_param_named_unsafe(name, value, type, perm) \
param_check_##type(name, &(value)); \
@@ -160,6 +167,7 @@ struct kparam_array
* module_param_cb - general callback for a module/cmdline parameter
* @name: a valid C identifier which is the parameter name.
* @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
* @perm: visibility in sysfs.
*
* The ops can have NULL set or get functions.
@@ -171,36 +179,96 @@ struct kparam_array
__module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1, \
KERNEL_PARAM_FL_UNSAFE)
+#define __level_param_cb(name, ops, arg, perm, level) \
+ __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, level, 0)
/**
- * <level>_param_cb - general callback for a module/cmdline parameter
- * to be evaluated before certain initcall level
+ * core_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before core initcall level
* @name: a valid C identifier which is the parameter name.
* @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
* @perm: visibility in sysfs.
*
* The ops can have NULL set or get functions.
*/
-#define __level_param_cb(name, ops, arg, perm, level) \
- __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, level, 0)
-
#define core_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 1)
+/**
+ * postcore_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before postcore initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
#define postcore_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 2)
+/**
+ * arch_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before arch initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
#define arch_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 3)
+/**
+ * subsys_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before subsys initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
#define subsys_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 4)
+/**
+ * fs_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before fs initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
#define fs_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 5)
+/**
+ * device_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before device initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
#define device_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 6)
+/**
+ * late_param_cb - general callback for a module/cmdline parameter
+ * to be evaluated before late initcall level
+ * @name: a valid C identifier which is the parameter name.
+ * @ops: the set & get operations for this parameter.
+ * @arg: args for @ops
+ * @perm: visibility in sysfs.
+ *
+ * The ops can have NULL set or get functions.
+ */
#define late_param_cb(name, ops, arg, perm) \
__level_param_cb(name, ops, arg, perm, 7)
@@ -263,6 +331,10 @@ static inline void kernel_param_unlock(struct module *mod)
/**
* core_param_unsafe - same as core_param but taints kernel
+ * @name: the name of the cmdline and sysfs parameter (often the same as var)
+ * @var: the variable
+ * @type: the type of the parameter
+ * @perm: visibility in sysfs
*/
#define core_param_unsafe(name, var, type, perm) \
param_check_##type(name, &(var)); \
diff --git a/include/linux/most.h b/include/linux/most.h
new file mode 100644
index 000000000000..232e01b7f5d2
--- /dev/null
+++ b/include/linux/most.h
@@ -0,0 +1,337 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * most.h - API for component and adapter drivers
+ *
+ * Copyright (C) 2013-2015, Microchip Technology Germany II GmbH & Co. KG
+ */
+
+#ifndef __MOST_CORE_H__
+#define __MOST_CORE_H__
+
+#include <linux/types.h>
+#include <linux/device.h>
+
+struct module;
+struct interface_private;
+
+/**
+ * Interface type
+ */
+enum most_interface_type {
+ ITYPE_LOOPBACK = 1,
+ ITYPE_I2C,
+ ITYPE_I2S,
+ ITYPE_TSI,
+ ITYPE_HBI,
+ ITYPE_MEDIALB_DIM,
+ ITYPE_MEDIALB_DIM2,
+ ITYPE_USB,
+ ITYPE_PCIE
+};
+
+/**
+ * Channel direction.
+ */
+enum most_channel_direction {
+ MOST_CH_RX = 1 << 0,
+ MOST_CH_TX = 1 << 1,
+};
+
+/**
+ * Channel data type.
+ */
+enum most_channel_data_type {
+ MOST_CH_CONTROL = 1 << 0,
+ MOST_CH_ASYNC = 1 << 1,
+ MOST_CH_ISOC = 1 << 2,
+ MOST_CH_SYNC = 1 << 5,
+};
+
+enum most_status_flags {
+ /* MBO was processed successfully (data was send or received )*/
+ MBO_SUCCESS = 0,
+ /* The MBO contains wrong or missing information. */
+ MBO_E_INVAL,
+ /* MBO was completed as HDM Channel will be closed */
+ MBO_E_CLOSE,
+};
+
+/**
+ * struct most_channel_capability - Channel capability
+ * @direction: Supported channel directions.
+ * The value is bitwise OR-combination of the values from the
+ * enumeration most_channel_direction. Zero is allowed value and means
+ * "channel may not be used".
+ * @data_type: Supported channel data types.
+ * The value is bitwise OR-combination of the values from the
+ * enumeration most_channel_data_type. Zero is allowed value and means
+ * "channel may not be used".
+ * @num_buffers_packet: Maximum number of buffers supported by this channel
+ * for packet data types (Async,Control,QoS)
+ * @buffer_size_packet: Maximum buffer size supported by this channel
+ * for packet data types (Async,Control,QoS)
+ * @num_buffers_streaming: Maximum number of buffers supported by this channel
+ * for streaming data types (Sync,AV Packetized)
+ * @buffer_size_streaming: Maximum buffer size supported by this channel
+ * for streaming data types (Sync,AV Packetized)
+ * @name_suffix: Optional suffix providean by an HDM that is attached to the
+ * regular channel name.
+ *
+ * Describes the capabilities of a MOST channel like supported Data Types
+ * and directions. This information is provided by an HDM for the MostCore.
+ *
+ * The Core creates read only sysfs attribute files in
+ * /sys/devices/most/mdev#/<channel>/ with the
+ * following attributes:
+ * -available_directions
+ * -available_datatypes
+ * -number_of_packet_buffers
+ * -number_of_stream_buffers
+ * -size_of_packet_buffer
+ * -size_of_stream_buffer
+ * where content of each file is a string with all supported properties of this
+ * very channel attribute.
+ */
+struct most_channel_capability {
+ u16 direction;
+ u16 data_type;
+ u16 num_buffers_packet;
+ u16 buffer_size_packet;
+ u16 num_buffers_streaming;
+ u16 buffer_size_streaming;
+ const char *name_suffix;
+};
+
+/**
+ * struct most_channel_config - stores channel configuration
+ * @direction: direction of the channel
+ * @data_type: data type travelling over this channel
+ * @num_buffers: number of buffers
+ * @buffer_size: size of a buffer for AIM.
+ * Buffer size may be cutted down by HDM in a configure callback
+ * to match to a given interface and channel type.
+ * @extra_len: additional buffer space for internal HDM purposes like padding.
+ * May be set by HDM in a configure callback if needed.
+ * @subbuffer_size: size of a subbuffer
+ * @packets_per_xact: number of MOST frames that are packet inside one USB
+ * packet. This is USB specific
+ *
+ * Describes the configuration for a MOST channel. This information is
+ * provided from the MostCore to a HDM (like the Medusa PCIe Interface) as a
+ * parameter of the "configure" function call.
+ */
+struct most_channel_config {
+ enum most_channel_direction direction;
+ enum most_channel_data_type data_type;
+ u16 num_buffers;
+ u16 buffer_size;
+ u16 extra_len;
+ u16 subbuffer_size;
+ u16 packets_per_xact;
+ u16 dbr_size;
+};
+
+/*
+ * struct mbo - MOST Buffer Object.
+ * @context: context for core completion handler
+ * @priv: private data for HDM
+ *
+ * public: documented fields that are used for the communications
+ * between MostCore and HDMs
+ *
+ * @list: list head for use by the mbo's current owner
+ * @ifp: (in) associated interface instance
+ * @num_buffers_ptr: amount of pool buffers
+ * @hdm_channel_id: (in) HDM channel instance
+ * @virt_address: (in) kernel virtual address of the buffer
+ * @bus_address: (in) bus address of the buffer
+ * @buffer_length: (in) buffer payload length
+ * @processed_length: (out) processed length
+ * @status: (out) transfer status
+ * @complete: (in) completion routine
+ *
+ * The core allocates and initializes the MBO.
+ *
+ * The HDM receives MBO for transfer from the core with the call to enqueue().
+ * The HDM copies the data to- or from the buffer depending on configured
+ * channel direction, set "processed_length" and "status" and completes
+ * the transfer procedure by calling the completion routine.
+ *
+ * Finally, the MBO is being deallocated or recycled for further
+ * transfers of the same or a different HDM.
+ *
+ * Directions of usage:
+ * The core driver should never access any MBO fields (even if marked
+ * as "public") while the MBO is owned by an HDM. The ownership starts with
+ * the call of enqueue() and ends with the call of its complete() routine.
+ *
+ * II.
+ * Every HDM attached to the core driver _must_ ensure that it returns any MBO
+ * it owns (due to a previous call to enqueue() by the core driver) before it
+ * de-registers an interface or gets unloaded from the kernel. If this direction
+ * is violated memory leaks will occur, since the core driver does _not_ track
+ * MBOs it is currently not in control of.
+ *
+ */
+struct mbo {
+ void *context;
+ void *priv;
+ struct list_head list;
+ struct most_interface *ifp;
+ int *num_buffers_ptr;
+ u16 hdm_channel_id;
+ void *virt_address;
+ dma_addr_t bus_address;
+ u16 buffer_length;
+ u16 processed_length;
+ enum most_status_flags status;
+ void (*complete)(struct mbo *mbo);
+};
+
+/**
+ * Interface instance description.
+ *
+ * Describes an interface of a MOST device the core driver is bound to.
+ * This structure is allocated and initialized in the HDM. MostCore may not
+ * modify this structure.
+ *
+ * @dev: the actual device
+ * @mod: module
+ * @interface Interface type. \sa most_interface_type.
+ * @description PRELIMINARY.
+ * Unique description of the device instance from point of view of the
+ * interface in free text form (ASCII).
+ * It may be a hexadecimal presentation of the memory address for the MediaLB
+ * IP or USB device ID with USB properties for USB interface, etc.
+ * @num_channels Number of channels and size of the channel_vector.
+ * @channel_vector Properties of the channels.
+ * Array index represents channel ID by the driver.
+ * @configure Callback to change data type for the channel of the
+ * interface instance. May be zero if the instance of the interface is not
+ * configurable. Parameter channel_config describes direction and data
+ * type for the channel, configured by the higher level. The content of
+ * @enqueue Delivers MBO to the HDM for processing.
+ * After HDM completes Rx- or Tx- operation the processed MBO shall
+ * be returned back to the MostCore using completion routine.
+ * The reason to get the MBO delivered from the MostCore after the channel
+ * is poisoned is the re-opening of the channel by the application.
+ * In this case the HDM shall hold MBOs and service the channel as usual.
+ * The HDM must be able to hold at least one MBO for each channel.
+ * The callback returns a negative value on error, otherwise 0.
+ * @poison_channel Informs HDM about closing the channel. The HDM shall
+ * cancel all transfers and synchronously or asynchronously return
+ * all enqueued for this channel MBOs using the completion routine.
+ * The callback returns a negative value on error, otherwise 0.
+ * @request_netinfo: triggers retrieving of network info from the HDM by
+ * means of "Message exchange over MDP/MEP"
+ * The call of the function request_netinfo with the parameter on_netinfo as
+ * NULL prohibits use of the previously obtained function pointer.
+ * @priv Private field used by mostcore to store context information.
+ */
+struct most_interface {
+ struct device *dev;
+ struct device *driver_dev;
+ struct module *mod;
+ enum most_interface_type interface;
+ const char *description;
+ unsigned int num_channels;
+ struct most_channel_capability *channel_vector;
+ void *(*dma_alloc)(struct mbo *mbo, u32 size);
+ void (*dma_free)(struct mbo *mbo, u32 size);
+ int (*configure)(struct most_interface *iface, int channel_idx,
+ struct most_channel_config *channel_config);
+ int (*enqueue)(struct most_interface *iface, int channel_idx,
+ struct mbo *mbo);
+ int (*poison_channel)(struct most_interface *iface, int channel_idx);
+ void (*request_netinfo)(struct most_interface *iface, int channel_idx,
+ void (*on_netinfo)(struct most_interface *iface,
+ unsigned char link_stat,
+ unsigned char *mac_addr));
+ void *priv;
+ struct interface_private *p;
+};
+
+/**
+ * struct most_component - identifies a loadable component for the mostcore
+ * @list: list_head
+ * @name: component name
+ * @probe_channel: function for core to notify driver about channel connection
+ * @disconnect_channel: callback function to disconnect a certain channel
+ * @rx_completion: completion handler for received packets
+ * @tx_completion: completion handler for transmitted packets
+ */
+struct most_component {
+ struct list_head list;
+ const char *name;
+ struct module *mod;
+ int (*probe_channel)(struct most_interface *iface, int channel_idx,
+ struct most_channel_config *cfg, char *name,
+ char *param);
+ int (*disconnect_channel)(struct most_interface *iface,
+ int channel_idx);
+ int (*rx_completion)(struct mbo *mbo);
+ int (*tx_completion)(struct most_interface *iface, int channel_idx);
+ int (*cfg_complete)(void);
+};
+
+/**
+ * most_register_interface - Registers instance of the interface.
+ * @iface: Pointer to the interface instance description.
+ *
+ * Returns a pointer to the kobject of the generated instance.
+ *
+ * Note: HDM has to ensure that any reference held on the kobj is
+ * released before deregistering the interface.
+ */
+int most_register_interface(struct most_interface *iface);
+
+/**
+ * Deregisters instance of the interface.
+ * @intf_instance Pointer to the interface instance description.
+ */
+void most_deregister_interface(struct most_interface *iface);
+void most_submit_mbo(struct mbo *mbo);
+
+/**
+ * most_stop_enqueue - prevents core from enqueing MBOs
+ * @iface: pointer to interface
+ * @channel_idx: channel index
+ */
+void most_stop_enqueue(struct most_interface *iface, int channel_idx);
+
+/**
+ * most_resume_enqueue - allow core to enqueue MBOs again
+ * @iface: pointer to interface
+ * @channel_idx: channel index
+ *
+ * This clears the enqueue halt flag and enqueues all MBOs currently
+ * in wait fifo.
+ */
+void most_resume_enqueue(struct most_interface *iface, int channel_idx);
+int most_register_component(struct most_component *comp);
+int most_deregister_component(struct most_component *comp);
+struct mbo *most_get_mbo(struct most_interface *iface, int channel_idx,
+ struct most_component *comp);
+void most_put_mbo(struct mbo *mbo);
+int channel_has_mbo(struct most_interface *iface, int channel_idx,
+ struct most_component *comp);
+int most_start_channel(struct most_interface *iface, int channel_idx,
+ struct most_component *comp);
+int most_stop_channel(struct most_interface *iface, int channel_idx,
+ struct most_component *comp);
+int __init configfs_init(void);
+int most_register_configfs_subsys(struct most_component *comp);
+void most_deregister_configfs_subsys(struct most_component *comp);
+int most_add_link(char *mdev, char *mdev_ch, char *comp_name, char *link_name,
+ char *comp_param);
+int most_remove_link(char *mdev, char *mdev_ch, char *comp_name);
+int most_set_cfg_buffer_size(char *mdev, char *mdev_ch, u16 val);
+int most_set_cfg_subbuffer_size(char *mdev, char *mdev_ch, u16 val);
+int most_set_cfg_dbr_size(char *mdev, char *mdev_ch, u16 val);
+int most_set_cfg_num_buffers(char *mdev, char *mdev_ch, u16 val);
+int most_set_cfg_datatype(char *mdev, char *mdev_ch, char *buf);
+int most_set_cfg_direction(char *mdev, char *mdev_ch, char *buf);
+int most_set_cfg_packets_xact(char *mdev, char *mdev_ch, u16 val);
+int most_cfg_complete(char *comp_name);
+void most_interface_register_notify(const char *mdev_name);
+#endif /* MOST_CORE_H_ */
diff --git a/include/linux/mroute_base.h b/include/linux/mroute_base.h
index 34de06b426ef..8071148f29a6 100644
--- a/include/linux/mroute_base.h
+++ b/include/linux/mroute_base.h
@@ -47,16 +47,16 @@ struct vif_entry_notifier_info {
};
static inline int mr_call_vif_notifier(struct notifier_block *nb,
- struct net *net,
unsigned short family,
enum fib_event_type event_type,
struct vif_device *vif,
- unsigned short vif_index, u32 tb_id)
+ unsigned short vif_index, u32 tb_id,
+ struct netlink_ext_ack *extack)
{
struct vif_entry_notifier_info info = {
.info = {
.family = family,
- .net = net,
+ .extack = extack,
},
.dev = vif->dev,
.vif_index = vif_index,
@@ -64,7 +64,7 @@ static inline int mr_call_vif_notifier(struct notifier_block *nb,
.tb_id = tb_id,
};
- return call_fib_notifier(nb, net, event_type, &info.info);
+ return call_fib_notifier(nb, event_type, &info.info);
}
static inline int mr_call_vif_notifiers(struct net *net,
@@ -77,7 +77,6 @@ static inline int mr_call_vif_notifiers(struct net *net,
struct vif_entry_notifier_info info = {
.info = {
.family = family,
- .net = net,
},
.dev = vif->dev,
.vif_index = vif_index,
@@ -173,21 +172,21 @@ struct mfc_entry_notifier_info {
};
static inline int mr_call_mfc_notifier(struct notifier_block *nb,
- struct net *net,
unsigned short family,
enum fib_event_type event_type,
- struct mr_mfc *mfc, u32 tb_id)
+ struct mr_mfc *mfc, u32 tb_id,
+ struct netlink_ext_ack *extack)
{
struct mfc_entry_notifier_info info = {
.info = {
.family = family,
- .net = net,
+ .extack = extack,
},
.mfc = mfc,
.tb_id = tb_id
};
- return call_fib_notifier(nb, net, event_type, &info.info);
+ return call_fib_notifier(nb, event_type, &info.info);
}
static inline int mr_call_mfc_notifiers(struct net *net,
@@ -199,7 +198,6 @@ static inline int mr_call_mfc_notifiers(struct net *net,
struct mfc_entry_notifier_info info = {
.info = {
.family = family,
- .net = net,
},
.mfc = mfc,
.tb_id = tb_id
@@ -301,10 +299,11 @@ int mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
int mr_dump(struct net *net, struct notifier_block *nb, unsigned short family,
int (*rules_dump)(struct net *net,
- struct notifier_block *nb),
+ struct notifier_block *nb,
+ struct netlink_ext_ack *extack),
struct mr_table *(*mr_iter)(struct net *net,
struct mr_table *mrt),
- rwlock_t *mrt_lock);
+ rwlock_t *mrt_lock, struct netlink_ext_ack *extack);
#else
static inline void vif_device_init(struct vif_device *v,
struct net_device *dev,
@@ -355,10 +354,11 @@ mr_rtm_dumproute(struct sk_buff *skb, struct netlink_callback *cb,
static inline int mr_dump(struct net *net, struct notifier_block *nb,
unsigned short family,
int (*rules_dump)(struct net *net,
- struct notifier_block *nb),
+ struct notifier_block *nb,
+ struct netlink_ext_ack *extack),
struct mr_table *(*mr_iter)(struct net *net,
struct mr_table *mrt),
- rwlock_t *mrt_lock)
+ rwlock_t *mrt_lock, struct netlink_ext_ack *extack)
{
return -EINVAL;
}
diff --git a/include/linux/msdos_partition.h b/include/linux/msdos_partition.h
new file mode 100644
index 000000000000..2cb82db2a43c
--- /dev/null
+++ b/include/linux/msdos_partition.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MSDOS_PARTITION_H
+#define _LINUX_MSDOS_PARTITION_H
+
+#define MSDOS_LABEL_MAGIC 0xAA55
+
+struct msdos_partition {
+ u8 boot_ind; /* 0x80 - active */
+ u8 head; /* starting head */
+ u8 sector; /* starting sector */
+ u8 cyl; /* starting cylinder */
+ u8 sys_ind; /* What partition type */
+ u8 end_head; /* end head */
+ u8 end_sector; /* end sector */
+ u8 end_cyl; /* end cylinder */
+ __le32 start_sect; /* starting sector counting from 0 */
+ __le32 nr_sects; /* nr of sectors in partition */
+} __packed;
+
+enum msdos_sys_ind {
+ /*
+ * These three have identical behaviour; use the second one if DOS FDISK
+ * gets confused about extended/logical partitions starting past
+ * cylinder 1023.
+ */
+ DOS_EXTENDED_PARTITION = 5,
+ LINUX_EXTENDED_PARTITION = 0x85,
+ WIN98_EXTENDED_PARTITION = 0x0f,
+
+ LINUX_DATA_PARTITION = 0x83,
+ LINUX_LVM_PARTITION = 0x8e,
+ LINUX_RAID_PARTITION = 0xfd, /* autodetect RAID partition */
+
+ SOLARIS_X86_PARTITION = 0x82, /* also Linux swap partitions */
+ NEW_SOLARIS_X86_PARTITION = 0xbf,
+
+ DM6_AUX1PARTITION = 0x51, /* no DDO: use xlated geom */
+ DM6_AUX3PARTITION = 0x53, /* no DDO: use xlated geom */
+ DM6_PARTITION = 0x54, /* has DDO: use xlated geom & offset */
+ EZD_PARTITION = 0x55, /* EZ-DRIVE */
+
+ FREEBSD_PARTITION = 0xa5, /* FreeBSD Partition ID */
+ OPENBSD_PARTITION = 0xa6, /* OpenBSD Partition ID */
+ NETBSD_PARTITION = 0xa9, /* NetBSD Partition ID */
+ BSDI_PARTITION = 0xb7, /* BSDI Partition ID */
+ MINIX_PARTITION = 0x81, /* Minix Partition ID */
+ UNIXWARE_PARTITION = 0x63, /* Same as GNU_HURD and SCO Unix */
+};
+
+#endif /* LINUX_MSDOS_PARTITION_H */
diff --git a/include/linux/mtd/flashchip.h b/include/linux/mtd/flashchip.h
index ecc88a41792a..c04f690871ca 100644
--- a/include/linux/mtd/flashchip.h
+++ b/include/linux/mtd/flashchip.h
@@ -40,7 +40,7 @@ typedef enum {
FL_READING,
FL_CACHEDPRG,
/* These 4 come from onenand_state_t, which has been unified here */
- FL_RESETING,
+ FL_RESETTING,
FL_OTPING,
FL_PREPARING_ERASE,
FL_VERIFYING_ERASE,
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index fc0b4b19c900..5abd91cc6dfa 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -22,6 +22,7 @@
#define SNOR_MFR_INTEL CFI_MFR_INTEL
#define SNOR_MFR_ST CFI_MFR_ST /* ST Micro */
#define SNOR_MFR_MICRON CFI_MFR_MICRON /* Micron */
+#define SNOR_MFR_ISSI CFI_MFR_PMC
#define SNOR_MFR_MACRONIX CFI_MFR_MACRONIX
#define SNOR_MFR_SPANSION CFI_MFR_AMD
#define SNOR_MFR_SST CFI_MFR_SST
@@ -127,13 +128,14 @@
#define SR_BP0 BIT(2) /* Block protect 0 */
#define SR_BP1 BIT(3) /* Block protect 1 */
#define SR_BP2 BIT(4) /* Block protect 2 */
-#define SR_TB BIT(5) /* Top/Bottom protect */
+#define SR_TB_BIT5 BIT(5) /* Top/Bottom protect */
+#define SR_TB_BIT6 BIT(6) /* Top/Bottom protect */
#define SR_SRWD BIT(7) /* SR write protect */
/* Spansion/Cypress specific status bits */
#define SR_E_ERR BIT(5)
#define SR_P_ERR BIT(6)
-#define SR_QUAD_EN_MX BIT(6) /* Macronix Quad I/O */
+#define SR1_QUAD_EN_BIT6 BIT(6)
/* Enhanced Volatile Configuration Register bits */
#define EVCR_QUAD_EN_MICRON BIT(7) /* Micron Quad I/O */
@@ -144,10 +146,8 @@
#define FSR_P_ERR BIT(4) /* Program operation status */
#define FSR_PT_ERR BIT(1) /* Protection error bit */
-/* Configuration Register bits. */
-#define CR_QUAD_EN_SPAN BIT(1) /* Spansion Quad I/O */
-
/* Status Register 2 bits. */
+#define SR2_QUAD_EN_BIT1 BIT(1)
#define SR2_QUAD_EN_BIT7 BIT(7)
/* Supported SPI protocols */
@@ -225,14 +225,6 @@ static inline u8 spi_nor_get_protocol_width(enum spi_nor_protocol proto)
return spi_nor_get_protocol_data_nbits(proto);
}
-enum spi_nor_ops {
- SPI_NOR_OPS_READ = 0,
- SPI_NOR_OPS_WRITE,
- SPI_NOR_OPS_ERASE,
- SPI_NOR_OPS_LOCK,
- SPI_NOR_OPS_UNLOCK,
-};
-
enum spi_nor_option_flags {
SNOR_F_USE_FSR = BIT(0),
SNOR_F_HAS_SR_TB = BIT(1),
@@ -243,6 +235,10 @@ enum spi_nor_option_flags {
SNOR_F_4B_OPCODES = BIT(6),
SNOR_F_HAS_4BAIT = BIT(7),
SNOR_F_HAS_LOCK = BIT(8),
+ SNOR_F_HAS_16BIT_SR = BIT(9),
+ SNOR_F_NO_READ_CR = BIT(10),
+ SNOR_F_HAS_SR_TB_BIT6 = BIT(11),
+
};
/**
@@ -466,6 +462,34 @@ enum spi_nor_pp_command_index {
struct spi_nor;
/**
+ * struct spi_nor_controller_ops - SPI NOR controller driver specific
+ * operations.
+ * @prepare: [OPTIONAL] do some preparations for the
+ * read/write/erase/lock/unlock operations.
+ * @unprepare: [OPTIONAL] do some post work after the
+ * read/write/erase/lock/unlock operations.
+ * @read_reg: read out the register.
+ * @write_reg: write data to the register.
+ * @read: read data from the SPI NOR.
+ * @write: write data to the SPI NOR.
+ * @erase: erase a sector of the SPI NOR at the offset @offs; if
+ * not provided by the driver, spi-nor will send the erase
+ * opcode via write_reg().
+ */
+struct spi_nor_controller_ops {
+ int (*prepare)(struct spi_nor *nor);
+ void (*unprepare)(struct spi_nor *nor);
+ int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, size_t len);
+ int (*write_reg)(struct spi_nor *nor, u8 opcode, const u8 *buf,
+ size_t len);
+
+ ssize_t (*read)(struct spi_nor *nor, loff_t from, size_t len, u8 *buf);
+ ssize_t (*write)(struct spi_nor *nor, loff_t to, size_t len,
+ const u8 *buf);
+ int (*erase)(struct spi_nor *nor, loff_t offs);
+};
+
+/**
* struct spi_nor_locking_ops - SPI NOR locking methods
* @lock: lock a region of the SPI NOR.
* @unlock: unlock a region of the SPI NOR.
@@ -549,19 +573,7 @@ struct flash_info;
* @read_proto: the SPI protocol for read operations
* @write_proto: the SPI protocol for write operations
* @reg_proto the SPI protocol for read_reg/write_reg/erase operations
- * @prepare: [OPTIONAL] do some preparations for the
- * read/write/erase/lock/unlock operations
- * @unprepare: [OPTIONAL] do some post work after the
- * read/write/erase/lock/unlock operations
- * @read_reg: [DRIVER-SPECIFIC] read out the register
- * @write_reg: [DRIVER-SPECIFIC] write data to the register
- * @read: [DRIVER-SPECIFIC] read data from the SPI NOR
- * @write: [DRIVER-SPECIFIC] write data to the SPI NOR
- * @erase: [DRIVER-SPECIFIC] erase a sector of the SPI NOR
- * at the offset @offs; if not provided by the driver,
- * spi-nor will send the erase opcode via write_reg()
- * @clear_sr_bp: [FLASH-SPECIFIC] clears the Block Protection Bits from
- * the SPI NOR Status Register.
+ * @controller_ops: SPI NOR controller driver specific operations.
* @params: [FLASH-SPECIFIC] SPI-NOR flash parameters and settings.
* The structure includes legacy flash parameters and
* settings that can be overwritten by the spi_nor_fixups
@@ -588,18 +600,8 @@ struct spi_nor {
bool sst_write_second;
u32 flags;
- int (*prepare)(struct spi_nor *nor, enum spi_nor_ops ops);
- void (*unprepare)(struct spi_nor *nor, enum spi_nor_ops ops);
- int (*read_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
- int (*write_reg)(struct spi_nor *nor, u8 opcode, u8 *buf, int len);
-
- ssize_t (*read)(struct spi_nor *nor, loff_t from,
- size_t len, u_char *read_buf);
- ssize_t (*write)(struct spi_nor *nor, loff_t to,
- size_t len, const u_char *write_buf);
- int (*erase)(struct spi_nor *nor, loff_t offs);
+ const struct spi_nor_controller_ops *controller_ops;
- int (*clear_sr_bp)(struct spi_nor *nor);
struct spi_nor_flash_parameter params;
void *priv;
diff --git a/include/linux/mtio.h b/include/linux/mtio.h
new file mode 100644
index 000000000000..67d03156f2c2
--- /dev/null
+++ b/include/linux/mtio.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_MTIO_COMPAT_H
+#define _LINUX_MTIO_COMPAT_H
+
+#include <linux/compat.h>
+#include <uapi/linux/mtio.h>
+#include <linux/uaccess.h>
+
+/*
+ * helper functions for implementing compat ioctls on the four tape
+ * drivers: we define the 32-bit layout of each incompatible structure,
+ * plus a wrapper function to copy it to user space in either format.
+ */
+
+struct mtget32 {
+ s32 mt_type;
+ s32 mt_resid;
+ s32 mt_dsreg;
+ s32 mt_gstat;
+ s32 mt_erreg;
+ s32 mt_fileno;
+ s32 mt_blkno;
+};
+#define MTIOCGET32 _IOR('m', 2, struct mtget32)
+
+struct mtpos32 {
+ s32 mt_blkno;
+};
+#define MTIOCPOS32 _IOR('m', 3, struct mtpos32)
+
+static inline int put_user_mtget(void __user *u, struct mtget *k)
+{
+ struct mtget32 k32 = {
+ .mt_type = k->mt_type,
+ .mt_resid = k->mt_resid,
+ .mt_dsreg = k->mt_dsreg,
+ .mt_gstat = k->mt_gstat,
+ .mt_erreg = k->mt_erreg,
+ .mt_fileno = k->mt_fileno,
+ .mt_blkno = k->mt_blkno,
+ };
+ int ret;
+
+ if (in_compat_syscall())
+ ret = copy_to_user(u, &k32, sizeof(k32));
+ else
+ ret = copy_to_user(u, k, sizeof(*k));
+
+ return ret ? -EFAULT : 0;
+}
+
+static inline int put_user_mtpos(void __user *u, struct mtpos *k)
+{
+ if (in_compat_syscall())
+ return put_user(k->mt_blkno, (u32 __user *)u);
+ else
+ return put_user(k->mt_blkno, (long __user *)u);
+}
+
+#endif
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index aca8f36dfac9..ae197cc00cc8 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -109,8 +109,11 @@ do { \
} while (0)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
- , .dep_map = { .name = #lockname }
+# define __DEP_MAP_MUTEX_INITIALIZER(lockname) \
+ , .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_SLEEP, \
+ }
#else
# define __DEP_MAP_MUTEX_INITIALIZER(lockname)
#endif
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 397a08ade6a2..0dd980d7318f 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -2,6 +2,7 @@
#ifndef _LINUX_NAMEI_H
#define _LINUX_NAMEI_H
+#include <linux/fs.h>
#include <linux/kernel.h>
#include <linux/path.h>
#include <linux/fcntl.h>
@@ -34,11 +35,19 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
/* internal use only */
#define LOOKUP_PARENT 0x0010
-#define LOOKUP_NO_REVAL 0x0080
#define LOOKUP_JUMPED 0x1000
#define LOOKUP_ROOT 0x2000
#define LOOKUP_ROOT_GRABBED 0x0008
+/* Scoping flags for lookup. */
+#define LOOKUP_NO_SYMLINKS 0x010000 /* No symlink crossing. */
+#define LOOKUP_NO_MAGICLINKS 0x020000 /* No nd_jump_link() crossing. */
+#define LOOKUP_NO_XDEV 0x040000 /* No mountpoint crossing. */
+#define LOOKUP_BENEATH 0x080000 /* No escaping from starting point. */
+#define LOOKUP_IN_ROOT 0x100000 /* Treat dirfd as fs root. */
+/* LOOKUP_* flags which do scope-related checks based on the dirfd. */
+#define LOOKUP_IS_SCOPED (LOOKUP_BENEATH | LOOKUP_IN_ROOT)
+
extern int path_pts(struct path *path);
extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
@@ -60,6 +69,7 @@ extern int kern_path_mountpoint(int, const char *, struct path *, unsigned int);
extern struct dentry *try_lookup_one_len(const char *, struct dentry *, int);
extern struct dentry *lookup_one_len(const char *, struct dentry *, int);
extern struct dentry *lookup_one_len_unlocked(const char *, struct dentry *, int);
+extern struct dentry *lookup_positive_unlocked(const char *, struct dentry *, int);
extern int follow_down_one(struct path *);
extern int follow_down(struct path *);
@@ -68,7 +78,7 @@ extern int follow_up(struct path *);
extern struct dentry *lock_rename(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
-extern void nd_jump_link(struct path *path);
+extern int __must_check nd_jump_link(struct path *path);
static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
{
diff --git a/include/linux/nd.h b/include/linux/nd.h
index f778f962d1b6..55c735997805 100644
--- a/include/linux/nd.h
+++ b/include/linux/nd.h
@@ -147,7 +147,7 @@ static inline int nvdimm_read_bytes(struct nd_namespace_common *ndns,
/**
* nvdimm_write_bytes() - synchronously write bytes to an nvdimm namespace
- * @ndns: device to read
+ * @ndns: device to write
* @offset: namespace-relative starting offset
* @buf: buffer to drain
* @size: transfer length
diff --git a/include/linux/net.h b/include/linux/net.h
index 9cafb5f353a9..6451425e828f 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -171,6 +171,7 @@ struct proto_ops {
int (*compat_getsockopt)(struct socket *sock, int level,
int optname, char __user *optval, int __user *optlen);
#endif
+ void (*show_fdinfo)(struct seq_file *m, struct socket *sock);
int (*sendmsg) (struct socket *sock, struct msghdr *m,
size_t total_len);
/* Notes for implementing recvmsg:
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 4b19c544c59a..34d050bb1ae6 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -53,8 +53,9 @@ enum {
NETIF_F_GSO_ESP_BIT, /* ... ESP with TSO */
NETIF_F_GSO_UDP_BIT, /* ... UFO, deprecated except tuntap */
NETIF_F_GSO_UDP_L4_BIT, /* ... UDP payload GSO (not UFO) */
+ NETIF_F_GSO_FRAGLIST_BIT, /* ... Fraglist GSO */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
- NETIF_F_GSO_UDP_L4_BIT,
+ NETIF_F_GSO_FRAGLIST_BIT,
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */
@@ -80,6 +81,7 @@ enum {
NETIF_F_GRO_HW_BIT, /* Hardware Generic receive offload */
NETIF_F_HW_TLS_RECORD_BIT, /* Offload TLS record */
+ NETIF_F_GRO_FRAGLIST_BIT, /* Fraglist GRO */
/*
* Add your fresh new feature above and remember to update
@@ -150,6 +152,8 @@ enum {
#define NETIF_F_GSO_UDP_L4 __NETIF_F(GSO_UDP_L4)
#define NETIF_F_HW_TLS_TX __NETIF_F(HW_TLS_TX)
#define NETIF_F_HW_TLS_RX __NETIF_F(HW_TLS_RX)
+#define NETIF_F_GRO_FRAGLIST __NETIF_F(GRO_FRAGLIST)
+#define NETIF_F_GSO_FRAGLIST __NETIF_F(GSO_FRAGLIST)
/* Finds the next feature with the highest number of the range of start till 0.
*/
@@ -226,6 +230,9 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start)
/* changeable features with no special hardware requirements */
#define NETIF_F_SOFT_FEATURES (NETIF_F_GSO | NETIF_F_GRO)
+/* Changeable features with no special hardware requirements that defaults to off. */
+#define NETIF_F_SOFT_FEATURES_OFF NETIF_F_GRO_FRAGLIST
+
#define NETIF_F_VLAN_FEATURES (NETIF_F_HW_VLAN_CTAG_FILTER | \
NETIF_F_HW_VLAN_CTAG_RX | \
NETIF_F_HW_VLAN_CTAG_TX | \
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c20f190b4c18..6c3f7032e8d9 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -72,6 +72,8 @@ void netdev_set_default_ethtool_ops(struct net_device *dev,
#define NET_RX_SUCCESS 0 /* keep 'em coming, baby */
#define NET_RX_DROP 1 /* packet dropped */
+#define MAX_NEST_DEV 8
+
/*
* Transmit return codes: transmit return codes originate from three different
* namespaces:
@@ -848,6 +850,9 @@ enum tc_setup_type {
TC_SETUP_ROOT_QDISC,
TC_SETUP_QDISC_GRED,
TC_SETUP_QDISC_TAPRIO,
+ TC_SETUP_FT,
+ TC_SETUP_QDISC_ETS,
+ TC_SETUP_QDISC_TBF,
};
/* These structures hold the attributes of bpf state that are being passed
@@ -874,6 +879,7 @@ enum bpf_netdev_command {
struct bpf_prog_offload_ops;
struct netlink_ext_ack;
struct xdp_umem;
+struct xdp_dev_bulk_queue;
struct netdev_bpf {
enum bpf_netdev_command command;
@@ -925,6 +931,20 @@ struct dev_ifalias {
struct devlink;
struct tlsdev_ops;
+struct netdev_name_node {
+ struct hlist_node hlist;
+ struct list_head list;
+ struct net_device *dev;
+ const char *name;
+};
+
+int netdev_name_node_alt_create(struct net_device *dev, const char *name);
+int netdev_name_node_alt_destroy(struct net_device *dev, const char *name);
+
+struct netdev_net_notifier {
+ struct list_head list;
+ struct notifier_block *nb;
+};
/*
* This structure defines the management hooks for network devices.
@@ -1004,7 +1024,7 @@ struct tlsdev_ops;
* Called when a user wants to change the Maximum Transfer Unit
* of a device.
*
- * void (*ndo_tx_timeout)(struct net_device *dev);
+ * void (*ndo_tx_timeout)(struct net_device *dev, unsigned int txqueue);
* Callback used when the transmitter has not made any progress
* for dev->watchdog ticks.
*
@@ -1271,7 +1291,8 @@ struct net_device_ops {
int new_mtu);
int (*ndo_neigh_setup)(struct net_device *dev,
struct neigh_parms *);
- void (*ndo_tx_timeout) (struct net_device *dev);
+ void (*ndo_tx_timeout) (struct net_device *dev,
+ unsigned int txqueue);
void (*ndo_get_stats64)(struct net_device *dev,
struct rtnl_link_stats64 *storage);
@@ -1317,6 +1338,10 @@ struct net_device_ops {
struct nlattr *port[]);
int (*ndo_get_vf_port)(struct net_device *dev,
int vf, struct sk_buff *skb);
+ int (*ndo_get_vf_guid)(struct net_device *dev,
+ int vf,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid);
int (*ndo_set_vf_guid)(struct net_device *dev,
int vf, u64 guid,
int guid_type);
@@ -1564,7 +1589,7 @@ enum netdev_priv_flags {
* (i.e. as seen by users in the "Space.c" file). It is the name
* of the interface.
*
- * @name_hlist: Device name hash chain, please keep it close to name[]
+ * @name_node: Name hashlist node
* @ifalias: SNMP alias
* @mem_end: Shared memory end
* @mem_start: Shared memory start
@@ -1593,6 +1618,7 @@ enum netdev_priv_flags {
* and drivers will need to set them appropriately.
*
* @mpls_features: Mask of features inheritable by MPLS
+ * @gso_partial_features: value(s) from NETIF_F_GSO\*
*
* @ifindex: interface index
* @group: The group the device belongs to
@@ -1617,8 +1643,11 @@ enum netdev_priv_flags {
* @netdev_ops: Includes several pointers to callbacks,
* if one wants to override the ndo_*() functions
* @ethtool_ops: Management operations
+ * @l3mdev_ops: Layer 3 master device operations
* @ndisc_ops: Includes callbacks for different IPv6 neighbour
* discovery handling. Necessary for e.g. 6LoWPAN.
+ * @xfrmdev_ops: Transformation offload operations
+ * @tlsdev_ops: Transport Layer Security offload operations
* @header_ops: Includes callbacks for creating,parsing,caching,etc
* of Layer 2 headers.
*
@@ -1657,6 +1686,7 @@ enum netdev_priv_flags {
* @dev_port: Used to differentiate devices that share
* the same function
* @addr_list_lock: XXX: need comments on this one
+ * @name_assign_type: network interface name assignment type
* @uc_promisc: Counter that indicates promiscuous mode
* has been enabled due to the need to listen to
* additional unicast addresses in a device that
@@ -1679,6 +1709,9 @@ enum netdev_priv_flags {
* @ip6_ptr: IPv6 specific data
* @ax25_ptr: AX.25 specific data
* @ieee80211_ptr: IEEE 802.11 specific data, assign before registering
+ * @ieee802154_ptr: IEEE 802.15.4 low-rate Wireless Personal Area Network
+ * device struct
+ * @mpls_ptr: mpls_dev struct pointer
*
* @dev_addr: Hw address (before bcast,
* because most packets are unicast)
@@ -1687,12 +1720,15 @@ enum netdev_priv_flags {
* @num_rx_queues: Number of RX queues
* allocated at register_netdev() time
* @real_num_rx_queues: Number of RX queues currently active in device
+ * @xdp_prog: XDP sockets filter program pointer
+ * @gro_flush_timeout: timeout for GRO layer in NAPI
*
* @rx_handler: handler for received packets
* @rx_handler_data: XXX: need comments on this one
* @miniq_ingress: ingress/clsact qdisc specific data for
* ingress processing
* @ingress_queue: XXX: need comments on this one
+ * @nf_hooks_ingress: netfilter hooks executed for ingress packets
* @broadcast: hw bcast address
*
* @rx_cpu_rmap: CPU reverse-mapping for RX completion interrupts,
@@ -1707,10 +1743,14 @@ enum netdev_priv_flags {
* @qdisc: Root qdisc from userspace point of view
* @tx_queue_len: Max frames per queue allowed
* @tx_global_lock: XXX: need comments on this one
+ * @xdp_bulkq: XDP device bulk queue
+ * @xps_cpus_map: all CPUs map for XPS device
+ * @xps_rxqs_map: all RXQs map for XPS device
*
* @xps_maps: XXX: need comments on this one
* @miniq_egress: clsact qdisc specific data for
* egress processing
+ * @qdisc_hash: qdisc hash table
* @watchdog_timeo: Represents the timeout that is used by
* the watchdog (see dev_watchdog())
* @watchdog_timer: List of timers
@@ -1761,7 +1801,7 @@ enum netdev_priv_flags {
* for hardware timestamping
* @sfp_bus: attached &struct sfp_bus structure.
* @qdisc_tx_busylock_key: lockdep class annotating Qdisc->busylock
- spinlock
+ * spinlock
* @qdisc_running_key: lockdep class annotating Qdisc->running seqcount
* @qdisc_xmit_lock_key: lockdep class annotating
* netdev_queue->_xmit_lock spinlock
@@ -1774,13 +1814,17 @@ enum netdev_priv_flags {
*
* @wol_enabled: Wake-on-LAN is enabled
*
+ * @net_notifier_list: List of per-net netdev notifier block
+ * that follow this device when it is moved
+ * to another network namespace.
+ *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
struct net_device {
char name[IFNAMSIZ];
- struct hlist_node name_hlist;
+ struct netdev_name_node *name_node;
struct dev_ifalias __rcu *ifalias;
/*
* I/O specific fields
@@ -1867,6 +1911,11 @@ struct net_device {
unsigned char if_port;
unsigned char dma;
+ /* Note : dev->mtu is often read without holding a lock.
+ * Writers usually hold RTNL.
+ * It is recommended to use READ_ONCE() to annotate the reads,
+ * and to use WRITE_ONCE() to annotate the writes.
+ */
unsigned int mtu;
unsigned int min_mtu;
unsigned int max_mtu;
@@ -1964,12 +2013,10 @@ struct net_device {
unsigned int num_tx_queues;
unsigned int real_num_tx_queues;
struct Qdisc *qdisc;
-#ifdef CONFIG_NET_SCHED
- DECLARE_HASHTABLE (qdisc_hash, 4);
-#endif
unsigned int tx_queue_len;
spinlock_t tx_global_lock;
- int watchdog_timeo;
+
+ struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
#ifdef CONFIG_XPS
struct xps_dev_maps __rcu *xps_cpus_map;
@@ -1979,11 +2026,15 @@ struct net_device {
struct mini_Qdisc __rcu *miniq_egress;
#endif
+#ifdef CONFIG_NET_SCHED
+ DECLARE_HASHTABLE (qdisc_hash, 4);
+#endif
/* These may be needed for future network-power-down code. */
struct timer_list watchdog_timer;
+ int watchdog_timeo;
- int __percpu *pcpu_refcnt;
struct list_head todo_list;
+ int __percpu *pcpu_refcnt;
struct list_head link_watch_list;
@@ -2059,6 +2110,8 @@ struct net_device {
struct lock_class_key addr_list_lock_key;
bool proto_down;
unsigned wol_enabled:1;
+
+ struct list_head net_notifier_list;
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -2300,7 +2353,8 @@ struct napi_gro_cb {
/* Number of gro_receive callbacks this packet already went through */
u8 recursion_counter:4;
- /* 1 bit hole */
+ /* GRO is done by frag_list pointer chaining. */
+ u8 is_flist:1;
/* used to support CHECKSUM_COMPLETE for tunneling protocols */
__wsum csum;
@@ -2387,11 +2441,23 @@ struct pcpu_sw_netstats {
} __aligned(4 * sizeof(u64));
struct pcpu_lstats {
- u64 packets;
- u64 bytes;
+ u64_stats_t packets;
+ u64_stats_t bytes;
struct u64_stats_sync syncp;
} __aligned(2 * sizeof(u64));
+void dev_lstats_read(struct net_device *dev, u64 *packets, u64 *bytes);
+
+static inline void dev_lstats_add(struct net_device *dev, unsigned int len)
+{
+ struct pcpu_lstats *lstats = this_cpu_ptr(dev->lstats);
+
+ u64_stats_update_begin(&lstats->syncp);
+ u64_stats_add(&lstats->bytes, len);
+ u64_stats_inc(&lstats->packets);
+ u64_stats_update_end(&lstats->syncp);
+}
+
#define __netdev_alloc_pcpu_stats(type, gfp) \
({ \
typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
@@ -2487,6 +2553,15 @@ const char *netdev_cmd_to_name(enum netdev_cmd cmd);
int register_netdevice_notifier(struct notifier_block *nb);
int unregister_netdevice_notifier(struct notifier_block *nb);
+int register_netdevice_notifier_net(struct net *net, struct notifier_block *nb);
+int unregister_netdevice_notifier_net(struct net *net,
+ struct notifier_block *nb);
+int register_netdevice_notifier_dev_net(struct net_device *dev,
+ struct notifier_block *nb,
+ struct netdev_net_notifier *nn);
+int unregister_netdevice_notifier_dev_net(struct net_device *dev,
+ struct notifier_block *nb,
+ struct netdev_net_notifier *nn);
struct netdev_notifier_info {
struct net_device *dev;
@@ -2557,6 +2632,9 @@ extern rwlock_t dev_base_lock; /* Device list lock */
list_for_each_entry_safe(d, n, &(net)->dev_base_head, dev_list)
#define for_each_netdev_continue(net, d) \
list_for_each_entry_continue(d, &(net)->dev_base_head, dev_list)
+#define for_each_netdev_continue_reverse(net, d) \
+ list_for_each_entry_continue_reverse(d, &(net)->dev_base_head, \
+ dev_list)
#define for_each_netdev_continue_rcu(net, d) \
list_for_each_entry_continue_rcu(d, &(net)->dev_base_head, dev_list)
#define for_each_netdev_in_bond_rcu(bond, slave) \
@@ -2650,6 +2728,7 @@ struct net_device *dev_get_by_napi_id(unsigned int napi_id);
int netdev_get_name(struct net *net, char *name, int ifindex);
int dev_restart(struct net_device *dev);
int skb_gro_receive(struct sk_buff *p, struct sk_buff *skb);
+int skb_gro_receive_list(struct sk_buff *p, struct sk_buff *skb);
static inline unsigned int skb_gro_offset(const struct sk_buff *skb)
{
@@ -2786,16 +2865,16 @@ static inline bool __skb_gro_checksum_convert_check(struct sk_buff *skb)
}
static inline void __skb_gro_checksum_convert(struct sk_buff *skb,
- __sum16 check, __wsum pseudo)
+ __wsum pseudo)
{
NAPI_GRO_CB(skb)->csum = ~pseudo;
NAPI_GRO_CB(skb)->csum_valid = 1;
}
-#define skb_gro_checksum_try_convert(skb, proto, check, compute_pseudo) \
+#define skb_gro_checksum_try_convert(skb, proto, compute_pseudo) \
do { \
if (__skb_gro_checksum_convert_check(skb)) \
- __skb_gro_checksum_convert(skb, check, \
+ __skb_gro_checksum_convert(skb, \
compute_pseudo(skb, proto)); \
} while (0)
@@ -3485,7 +3564,7 @@ static inline unsigned int netif_attrmask_next(int n, const unsigned long *srcp,
}
/**
- * netif_attrmask_next_and - get the next CPU/Rx queue in *src1p & *src2p
+ * netif_attrmask_next_and - get the next CPU/Rx queue in \*src1p & \*src2p
* @n: CPU/Rx queue index
* @src1p: the first CPUs/Rx queues mask pointer
* @src2p: the second CPUs/Rx queues mask pointer
@@ -3661,6 +3740,8 @@ int dev_set_alias(struct net_device *, const char *, size_t);
int dev_get_alias(const struct net_device *, char *, size_t);
int dev_change_net_namespace(struct net_device *, struct net *, const char *);
int __dev_set_mtu(struct net_device *, int);
+int dev_validate_mtu(struct net_device *dev, int mtu,
+ struct netlink_ext_ack *extack);
int dev_set_mtu_ext(struct net_device *dev, int mtu,
struct netlink_ext_ack *extack);
int dev_set_mtu(struct net_device *, int);
@@ -3848,22 +3929,48 @@ void netif_device_attach(struct net_device *dev);
*/
enum {
- NETIF_MSG_DRV = 0x0001,
- NETIF_MSG_PROBE = 0x0002,
- NETIF_MSG_LINK = 0x0004,
- NETIF_MSG_TIMER = 0x0008,
- NETIF_MSG_IFDOWN = 0x0010,
- NETIF_MSG_IFUP = 0x0020,
- NETIF_MSG_RX_ERR = 0x0040,
- NETIF_MSG_TX_ERR = 0x0080,
- NETIF_MSG_TX_QUEUED = 0x0100,
- NETIF_MSG_INTR = 0x0200,
- NETIF_MSG_TX_DONE = 0x0400,
- NETIF_MSG_RX_STATUS = 0x0800,
- NETIF_MSG_PKTDATA = 0x1000,
- NETIF_MSG_HW = 0x2000,
- NETIF_MSG_WOL = 0x4000,
+ NETIF_MSG_DRV_BIT,
+ NETIF_MSG_PROBE_BIT,
+ NETIF_MSG_LINK_BIT,
+ NETIF_MSG_TIMER_BIT,
+ NETIF_MSG_IFDOWN_BIT,
+ NETIF_MSG_IFUP_BIT,
+ NETIF_MSG_RX_ERR_BIT,
+ NETIF_MSG_TX_ERR_BIT,
+ NETIF_MSG_TX_QUEUED_BIT,
+ NETIF_MSG_INTR_BIT,
+ NETIF_MSG_TX_DONE_BIT,
+ NETIF_MSG_RX_STATUS_BIT,
+ NETIF_MSG_PKTDATA_BIT,
+ NETIF_MSG_HW_BIT,
+ NETIF_MSG_WOL_BIT,
+
+ /* When you add a new bit above, update netif_msg_class_names array
+ * in net/ethtool/common.c
+ */
+ NETIF_MSG_CLASS_COUNT,
};
+/* Both ethtool_ops interface and internal driver implementation use u32 */
+static_assert(NETIF_MSG_CLASS_COUNT <= 32);
+
+#define __NETIF_MSG_BIT(bit) ((u32)1 << (bit))
+#define __NETIF_MSG(name) __NETIF_MSG_BIT(NETIF_MSG_ ## name ## _BIT)
+
+#define NETIF_MSG_DRV __NETIF_MSG(DRV)
+#define NETIF_MSG_PROBE __NETIF_MSG(PROBE)
+#define NETIF_MSG_LINK __NETIF_MSG(LINK)
+#define NETIF_MSG_TIMER __NETIF_MSG(TIMER)
+#define NETIF_MSG_IFDOWN __NETIF_MSG(IFDOWN)
+#define NETIF_MSG_IFUP __NETIF_MSG(IFUP)
+#define NETIF_MSG_RX_ERR __NETIF_MSG(RX_ERR)
+#define NETIF_MSG_TX_ERR __NETIF_MSG(TX_ERR)
+#define NETIF_MSG_TX_QUEUED __NETIF_MSG(TX_QUEUED)
+#define NETIF_MSG_INTR __NETIF_MSG(INTR)
+#define NETIF_MSG_TX_DONE __NETIF_MSG(TX_DONE)
+#define NETIF_MSG_RX_STATUS __NETIF_MSG(RX_STATUS)
+#define NETIF_MSG_PKTDATA __NETIF_MSG(PKTDATA)
+#define NETIF_MSG_HW __NETIF_MSG(HW)
+#define NETIF_MSG_WOL __NETIF_MSG(WOL)
#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV)
#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE)
@@ -4081,9 +4188,6 @@ struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
unsigned char name_assign_type,
void (*setup)(struct net_device *),
unsigned int txqs, unsigned int rxqs);
-int dev_get_valid_name(struct net *net, struct net_device *dev,
- const char *name);
-
#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
alloc_netdev_mqs(sizeof_priv, name, name_assign_type, setup, 1, 1)
@@ -4287,11 +4391,8 @@ void *netdev_lower_get_next(struct net_device *dev,
ldev; \
ldev = netdev_lower_get_next(dev, &(iter)))
-struct net_device *netdev_all_lower_get_next(struct net_device *dev,
+struct net_device *netdev_next_lower_dev_rcu(struct net_device *dev,
struct list_head **iter);
-struct net_device *netdev_all_lower_get_next_rcu(struct net_device *dev,
- struct list_head **iter);
-
int netdev_walk_all_lower_dev(struct net_device *dev,
int (*fn)(struct net_device *lower_dev,
void *data),
@@ -4357,6 +4458,15 @@ struct netdev_notifier_bonding_info {
void netdev_bonding_info_change(struct net_device *dev,
struct netdev_bonding_info *bonding_info);
+#if IS_ENABLED(CONFIG_ETHTOOL_NETLINK)
+void ethtool_notify(struct net_device *dev, unsigned int cmd, const void *data);
+#else
+static inline void ethtool_notify(struct net_device *dev, unsigned int cmd,
+ const void *data)
+{
+}
+#endif
+
static inline
struct sk_buff *skb_gso_segment(struct sk_buff *skb, netdev_features_t features)
{
@@ -4518,6 +4628,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP != (NETIF_F_GSO_UDP >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_UDP_L4 != (NETIF_F_GSO_UDP_L4 >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_FRAGLIST != (NETIF_F_GSO_FRAGLIST >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature;
}
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 77ebb61faf48..eb312e7ca36e 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -199,6 +199,8 @@ extern struct static_key nf_hooks_needed[NFPROTO_NUMPROTO][NF_MAX_HOOKS];
int nf_hook_slow(struct sk_buff *skb, struct nf_hook_state *state,
const struct nf_hook_entries *e, unsigned int i);
+void nf_hook_slow_list(struct list_head *head, struct nf_hook_state *state,
+ const struct nf_hook_entries *e);
/**
* nf_hook - call a netfilter hook
*
@@ -311,17 +313,36 @@ NF_HOOK_LIST(uint8_t pf, unsigned int hook, struct net *net, struct sock *sk,
struct list_head *head, struct net_device *in, struct net_device *out,
int (*okfn)(struct net *, struct sock *, struct sk_buff *))
{
- struct sk_buff *skb, *next;
- struct list_head sublist;
-
- INIT_LIST_HEAD(&sublist);
- list_for_each_entry_safe(skb, next, head, list) {
- list_del(&skb->list);
- if (nf_hook(pf, hook, net, sk, skb, in, out, okfn) == 1)
- list_add_tail(&skb->list, &sublist);
+ struct nf_hook_entries *hook_head = NULL;
+
+#ifdef CONFIG_JUMP_LABEL
+ if (__builtin_constant_p(pf) &&
+ __builtin_constant_p(hook) &&
+ !static_key_false(&nf_hooks_needed[pf][hook]))
+ return;
+#endif
+
+ rcu_read_lock();
+ switch (pf) {
+ case NFPROTO_IPV4:
+ hook_head = rcu_dereference(net->nf.hooks_ipv4[hook]);
+ break;
+ case NFPROTO_IPV6:
+ hook_head = rcu_dereference(net->nf.hooks_ipv6[hook]);
+ break;
+ default:
+ WARN_ON_ONCE(1);
+ break;
}
- /* Put passed packets back on main list */
- list_splice(&sublist, head);
+
+ if (hook_head) {
+ struct nf_hook_state state;
+
+ nf_hook_state_init(&state, hook, pf, in, out, sk, net, okfn);
+
+ nf_hook_slow_list(head, &state, hook_head);
+ }
+ rcu_read_unlock();
}
/* Call setsockopt() */
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 9bc255a8461b..5448c8b443db 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -121,6 +121,7 @@ struct ip_set_ext {
u32 timeout;
u8 packets_op;
u8 bytes_op;
+ bool target;
};
struct ip_set;
@@ -187,6 +188,14 @@ struct ip_set_type_variant {
/* Return true if "b" set is the same as "a"
* according to the create set parameters */
bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
+ /* Region-locking is used */
+ bool region_lock;
+};
+
+struct ip_set_region {
+ spinlock_t lock; /* Region lock */
+ size_t ext_size; /* Size of the dynamic extensions */
+ u32 elements; /* Number of elements vs timeout */
};
/* The core set type structure */
@@ -269,34 +278,15 @@ ip_set_ext_destroy(struct ip_set *set, void *data)
/* Check that the extension is enabled for the set and
* call it's destroy function for its extension part in data.
*/
- if (SET_WITH_COMMENT(set))
- ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy(
- set, ext_comment(data, set));
-}
+ if (SET_WITH_COMMENT(set)) {
+ struct ip_set_comment *c = ext_comment(data, set);
-static inline int
-ip_set_put_flags(struct sk_buff *skb, struct ip_set *set)
-{
- u32 cadt_flags = 0;
-
- if (SET_WITH_TIMEOUT(set))
- if (unlikely(nla_put_net32(skb, IPSET_ATTR_TIMEOUT,
- htonl(set->timeout))))
- return -EMSGSIZE;
- if (SET_WITH_COUNTER(set))
- cadt_flags |= IPSET_FLAG_WITH_COUNTERS;
- if (SET_WITH_COMMENT(set))
- cadt_flags |= IPSET_FLAG_WITH_COMMENT;
- if (SET_WITH_SKBINFO(set))
- cadt_flags |= IPSET_FLAG_WITH_SKBINFO;
- if (SET_WITH_FORCEADD(set))
- cadt_flags |= IPSET_FLAG_WITH_FORCEADD;
-
- if (!cadt_flags)
- return 0;
- return nla_put_net32(skb, IPSET_ATTR_CADT_FLAGS, htonl(cadt_flags));
+ ip_set_extensions[IPSET_EXT_ID_COMMENT].destroy(set, c);
+ }
}
+int ip_set_put_flags(struct sk_buff *skb, struct ip_set *set);
+
/* Netlink CB args */
enum {
IPSET_CB_NET = 0, /* net namespace */
@@ -445,13 +435,6 @@ ip6addrptr(const struct sk_buff *skb, bool src, struct in6_addr *addr)
sizeof(*addr));
}
-/* Calculate the bytes required to store the inclusive range of a-b */
-static inline int
-bitmap_bytes(u32 a, u32 b)
-{
- return 4 * ((((b - a + 8) / 8) + 3) / 4);
-}
-
/* How often should the gc be run by default */
#define IPSET_GC_TIME (3 * 60)
@@ -506,144 +489,8 @@ ip_set_timeout_set(unsigned long *timeout, u32 value)
*timeout = t;
}
-static inline u32
-ip_set_timeout_get(const unsigned long *timeout)
-{
- u32 t;
-
- if (*timeout == IPSET_ELEM_PERMANENT)
- return 0;
-
- t = jiffies_to_msecs(*timeout - jiffies)/MSEC_PER_SEC;
- /* Zero value in userspace means no timeout */
- return t == 0 ? 1 : t;
-}
-
-static inline char*
-ip_set_comment_uget(struct nlattr *tb)
-{
- return nla_data(tb);
-}
-
-/* Called from uadd only, protected by the set spinlock.
- * The kadt functions don't use the comment extensions in any way.
- */
-static inline void
-ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
- const struct ip_set_ext *ext)
-{
- struct ip_set_comment_rcu *c = rcu_dereference_protected(comment->c, 1);
- size_t len = ext->comment ? strlen(ext->comment) : 0;
-
- if (unlikely(c)) {
- set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
- kfree_rcu(c, rcu);
- rcu_assign_pointer(comment->c, NULL);
- }
- if (!len)
- return;
- if (unlikely(len > IPSET_MAX_COMMENT_SIZE))
- len = IPSET_MAX_COMMENT_SIZE;
- c = kmalloc(sizeof(*c) + len + 1, GFP_ATOMIC);
- if (unlikely(!c))
- return;
- strlcpy(c->str, ext->comment, len + 1);
- set->ext_size += sizeof(*c) + strlen(c->str) + 1;
- rcu_assign_pointer(comment->c, c);
-}
-
-/* Used only when dumping a set, protected by rcu_read_lock() */
-static inline int
-ip_set_put_comment(struct sk_buff *skb, const struct ip_set_comment *comment)
-{
- struct ip_set_comment_rcu *c = rcu_dereference(comment->c);
-
- if (!c)
- return 0;
- return nla_put_string(skb, IPSET_ATTR_COMMENT, c->str);
-}
-
-/* Called from uadd/udel, flush or the garbage collectors protected
- * by the set spinlock.
- * Called when the set is destroyed and when there can't be any user
- * of the set data anymore.
- */
-static inline void
-ip_set_comment_free(struct ip_set *set, struct ip_set_comment *comment)
-{
- struct ip_set_comment_rcu *c;
-
- c = rcu_dereference_protected(comment->c, 1);
- if (unlikely(!c))
- return;
- set->ext_size -= sizeof(*c) + strlen(c->str) + 1;
- kfree_rcu(c, rcu);
- rcu_assign_pointer(comment->c, NULL);
-}
-
-static inline void
-ip_set_add_bytes(u64 bytes, struct ip_set_counter *counter)
-{
- atomic64_add((long long)bytes, &(counter)->bytes);
-}
-
-static inline void
-ip_set_add_packets(u64 packets, struct ip_set_counter *counter)
-{
- atomic64_add((long long)packets, &(counter)->packets);
-}
-
-static inline u64
-ip_set_get_bytes(const struct ip_set_counter *counter)
-{
- return (u64)atomic64_read(&(counter)->bytes);
-}
-
-static inline u64
-ip_set_get_packets(const struct ip_set_counter *counter)
-{
- return (u64)atomic64_read(&(counter)->packets);
-}
-
-static inline bool
-ip_set_match_counter(u64 counter, u64 match, u8 op)
-{
- switch (op) {
- case IPSET_COUNTER_NONE:
- return true;
- case IPSET_COUNTER_EQ:
- return counter == match;
- case IPSET_COUNTER_NE:
- return counter != match;
- case IPSET_COUNTER_LT:
- return counter < match;
- case IPSET_COUNTER_GT:
- return counter > match;
- }
- return false;
-}
-
-static inline void
-ip_set_update_counter(struct ip_set_counter *counter,
- const struct ip_set_ext *ext, u32 flags)
-{
- if (ext->packets != ULLONG_MAX &&
- !(flags & IPSET_FLAG_SKIP_COUNTER_UPDATE)) {
- ip_set_add_bytes(ext->bytes, counter);
- ip_set_add_packets(ext->packets, counter);
- }
-}
-
-static inline bool
-ip_set_put_counter(struct sk_buff *skb, const struct ip_set_counter *counter)
-{
- return nla_put_net64(skb, IPSET_ATTR_BYTES,
- cpu_to_be64(ip_set_get_bytes(counter)),
- IPSET_ATTR_PAD) ||
- nla_put_net64(skb, IPSET_ATTR_PACKETS,
- cpu_to_be64(ip_set_get_packets(counter)),
- IPSET_ATTR_PAD);
-}
+void ip_set_init_comment(struct ip_set *set, struct ip_set_comment *comment,
+ const struct ip_set_ext *ext);
static inline void
ip_set_init_counter(struct ip_set_counter *counter,
@@ -656,31 +503,6 @@ ip_set_init_counter(struct ip_set_counter *counter,
}
static inline void
-ip_set_get_skbinfo(struct ip_set_skbinfo *skbinfo,
- const struct ip_set_ext *ext,
- struct ip_set_ext *mext, u32 flags)
-{
- mext->skbinfo = *skbinfo;
-}
-
-static inline bool
-ip_set_put_skbinfo(struct sk_buff *skb, const struct ip_set_skbinfo *skbinfo)
-{
- /* Send nonzero parameters only */
- return ((skbinfo->skbmark || skbinfo->skbmarkmask) &&
- nla_put_net64(skb, IPSET_ATTR_SKBMARK,
- cpu_to_be64((u64)skbinfo->skbmark << 32 |
- skbinfo->skbmarkmask),
- IPSET_ATTR_PAD)) ||
- (skbinfo->skbprio &&
- nla_put_net32(skb, IPSET_ATTR_SKBPRIO,
- cpu_to_be32(skbinfo->skbprio))) ||
- (skbinfo->skbqueue &&
- nla_put_net16(skb, IPSET_ATTR_SKBQUEUE,
- cpu_to_be16(skbinfo->skbqueue)));
-}
-
-static inline void
ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
const struct ip_set_ext *ext)
{
@@ -688,7 +510,7 @@ ip_set_init_skbinfo(struct ip_set_skbinfo *skbinfo,
}
#define IP_SET_INIT_KEXT(skb, opt, set) \
- { .bytes = (skb)->len, .packets = 1, \
+ { .bytes = (skb)->len, .packets = 1, .target = true,\
.timeout = ip_set_adt_opt_timeout(opt, set) }
#define IP_SET_INIT_UEXT(set) \
diff --git a/include/linux/netfilter/ipset/ip_set_bitmap.h b/include/linux/netfilter/ipset/ip_set_bitmap.h
index 2dddbc6dcac7..fcc4d214a788 100644
--- a/include/linux/netfilter/ipset/ip_set_bitmap.h
+++ b/include/linux/netfilter/ipset/ip_set_bitmap.h
@@ -12,18 +12,4 @@ enum {
IPSET_ADD_START_STORED_TIMEOUT,
};
-/* Common functions */
-
-static inline u32
-range_to_mask(u32 from, u32 to, u8 *bits)
-{
- u32 mask = 0xFFFFFFFE;
-
- *bits = 32;
- while (--(*bits) > 0 && mask && (to & mask) != from)
- mask <<= 1;
-
- return mask;
-}
-
#endif /* __IP_SET_BITMAP_H */
diff --git a/include/linux/netfilter/ipset/ip_set_getport.h b/include/linux/netfilter/ipset/ip_set_getport.h
index d74cd112b88a..1ecaabd9a048 100644
--- a/include/linux/netfilter/ipset/ip_set_getport.h
+++ b/include/linux/netfilter/ipset/ip_set_getport.h
@@ -20,9 +20,6 @@ static inline bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
}
#endif
-extern bool ip_set_get_ip_port(const struct sk_buff *skb, u8 pf, bool src,
- __be16 *port);
-
static inline bool ip_set_proto_with_ports(u8 proto)
{
switch (proto) {
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index cf09ab37b45b..851425c3178f 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -31,7 +31,7 @@ struct nfnetlink_subsystem {
const struct nfnl_callback *cb; /* callback for individual types */
struct module *owner;
int (*commit)(struct net *net, struct sk_buff *skb);
- int (*abort)(struct net *net, struct sk_buff *skb);
+ int (*abort)(struct net *net, struct sk_buff *skb, bool autoload);
void (*cleanup)(struct net *net);
bool (*valid_genid)(struct net *net, u32 genid);
};
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index 205fa7b1f07a..60739d0cbf93 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -115,6 +115,19 @@ static inline void nl_set_extack_cookie_u64(struct netlink_ext_ack *extack,
{
u64 __cookie = cookie;
+ if (!extack)
+ return;
+ memcpy(extack->cookie, &__cookie, sizeof(__cookie));
+ extack->cookie_len = sizeof(__cookie);
+}
+
+static inline void nl_set_extack_cookie_u32(struct netlink_ext_ack *extack,
+ u32 cookie)
+{
+ u32 __cookie = cookie;
+
+ if (!extack)
+ return;
memcpy(extack->cookie, &__cookie, sizeof(__cookie));
extack->cookie_len = sizeof(__cookie);
}
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index fd59904a282c..82d8fb422092 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -16,6 +16,7 @@
#include <linux/list.h>
#include <linux/uidgid.h>
#include <uapi/linux/nfs4.h>
+#include <linux/sunrpc/msg_prot.h>
enum nfs4_acl_whotype {
NFS4_ACL_WHO_NAMED = 0,
@@ -539,6 +540,8 @@ enum {
NFSPROC4_CLNT_LOOKUPP,
NFSPROC4_CLNT_LAYOUTERROR,
+
+ NFSPROC4_CLNT_COPY_NOTIFY,
};
/* nfs41 types */
@@ -674,4 +677,27 @@ struct nfs4_op_map {
} u;
};
+struct nfs42_netaddr {
+ char netid[RPCBIND_MAXNETIDLEN];
+ char addr[RPCBIND_MAXUADDRLEN + 1];
+ u32 netid_len;
+ u32 addr_len;
+};
+
+enum netloc_type4 {
+ NL4_NAME = 1,
+ NL4_URL = 2,
+ NL4_NETADDR = 3,
+};
+
+struct nl4_server {
+ enum netloc_type4 nl4_type;
+ union {
+ struct { /* NL4_NAME, NL4_URL */
+ int nl4_str_sz;
+ char nl4_str[NFS4_OPAQUE_LIMIT + 1];
+ };
+ struct nfs42_netaddr nl4_addr; /* NL4_NETADDR */
+ } u;
+};
#endif
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 570a60c2f4f4..5d5b91e54f73 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -168,6 +168,9 @@ struct nfs_inode {
struct rw_semaphore rmdir_sem;
struct mutex commit_mutex;
+ /* track last access to cached pages */
+ unsigned long page_index;
+
#if IS_ENABLED(CONFIG_NFS_V4)
struct nfs4_cached_acl *nfs4_acl;
/* NFSv4 state */
@@ -189,13 +192,15 @@ struct nfs_inode {
struct nfs4_copy_state {
struct list_head copies;
+ struct list_head src_copies;
nfs4_stateid stateid;
struct completion completion;
uint64_t count;
struct nfs_writeverf verf;
int error;
int flags;
- struct nfs4_state *parent_state;
+ struct nfs4_state *parent_src_state;
+ struct nfs4_state *parent_dst_state;
};
/*
@@ -332,35 +337,17 @@ static inline int nfs_server_capable(struct inode *inode, int cap)
return NFS_SERVER(inode)->caps & cap;
}
-static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf)
-{
- dentry->d_time = verf;
-}
-
/**
* nfs_save_change_attribute - Returns the inode attribute change cookie
* @dir - pointer to parent directory inode
- * The "change attribute" is updated every time we finish an operation
- * that will result in a metadata change on the server.
+ * The "cache change attribute" is updated when we need to revalidate
+ * our dentry cache after a directory was seen to change on the server.
*/
static inline unsigned long nfs_save_change_attribute(struct inode *dir)
{
return NFS_I(dir)->cache_change_attribute;
}
-/**
- * nfs_verify_change_attribute - Detects NFS remote directory changes
- * @dir - pointer to parent directory inode
- * @chattr - previously saved change attribute
- * Return "false" if the verifiers doesn't match the change attribute.
- * This would usually indicate that the directory contents have changed on
- * the server, and that any dentries need revalidating.
- */
-static inline int nfs_verify_change_attribute(struct inode *dir, unsigned long chattr)
-{
- return chattr == NFS_I(dir)->cache_change_attribute;
-}
-
/*
* linux/fs/nfs/inode.c
*/
@@ -490,6 +477,10 @@ extern const struct file_operations nfs_dir_operations;
extern const struct dentry_operations nfs_dentry_operations;
extern void nfs_force_lookup_revalidate(struct inode *dir);
+extern void nfs_set_verifier(struct dentry * dentry, unsigned long verf);
+#if IS_ENABLED(CONFIG_NFS_V4)
+extern void nfs_clear_verifier_delegated(struct inode *inode);
+#endif /* IS_ENABLED(CONFIG_NFS_V4) */
extern struct dentry *nfs_add_or_obtain(struct dentry *dentry,
struct nfs_fh *fh, struct nfs_fattr *fattr,
struct nfs4_label *label);
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index a87fe854f008..465fa98258a3 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -45,6 +45,9 @@ struct nfs_client {
#define NFS_CS_INFINITE_SLOTS 3 /* - don't limit TCP slots */
#define NFS_CS_NO_RETRANS_TIMEOUT 4 /* - Disable retransmit timeouts */
#define NFS_CS_TSM_POSSIBLE 5 /* - Maybe state migration */
+#define NFS_CS_NOPING 6 /* - don't ping on connect */
+#define NFS_CS_DS 7 /* - Server is a DS */
+#define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
@@ -149,6 +152,7 @@ struct nfs_server {
#define NFS_MOUNT_LOCAL_FLOCK 0x100000
#define NFS_MOUNT_LOCAL_FCNTL 0x200000
#define NFS_MOUNT_SOFTERR 0x400000
+#define NFS_MOUNT_SOFTREVAL 0x800000
unsigned int caps; /* server capabilities */
unsigned int rsize; /* read size */
@@ -171,7 +175,7 @@ struct nfs_server {
struct nfs_fsid fsid;
__u64 maxfilesize; /* maximum file size */
- struct timespec time_delta; /* smallest time granularity */
+ struct timespec64 time_delta; /* smallest time granularity */
unsigned long mount_time; /* when this fs was mounted */
struct super_block *super; /* VFS super block */
dev_t s_dev; /* superblock dev numbers */
@@ -276,5 +280,6 @@ struct nfs_server {
#define NFS_CAP_COPY (1U << 24)
#define NFS_CAP_OFFLOAD_CANCEL (1U << 25)
#define NFS_CAP_LAYOUTERROR (1U << 26)
+#define NFS_CAP_COPY_NOTIFY (1U << 27)
#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 9b8324ec08f3..6838c149f335 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -62,19 +62,20 @@ struct nfs_fattr {
struct nfs_fsid fsid;
__u64 fileid;
__u64 mounted_on_fileid;
- struct timespec atime;
- struct timespec mtime;
- struct timespec ctime;
+ struct timespec64 atime;
+ struct timespec64 mtime;
+ struct timespec64 ctime;
__u64 change_attr; /* NFSv4 change attribute */
__u64 pre_change_attr;/* pre-op NFSv4 change attribute */
__u64 pre_size; /* pre_op_attr.size */
- struct timespec pre_mtime; /* pre_op_attr.mtime */
- struct timespec pre_ctime; /* pre_op_attr.ctime */
+ struct timespec64 pre_mtime; /* pre_op_attr.mtime */
+ struct timespec64 pre_ctime; /* pre_op_attr.ctime */
unsigned long time_start;
unsigned long gencount;
struct nfs4_string *owner_name;
struct nfs4_string *group_name;
struct nfs4_threshold *mdsthreshold; /* pNFS threshold hints */
+ struct nfs4_label *label;
};
#define NFS_ATTR_FATTR_TYPE (1U << 0)
@@ -143,7 +144,7 @@ struct nfs_fsinfo {
__u32 wtmult; /* writes should be multiple of this */
__u32 dtpref; /* pref. readdir transfer size */
__u64 maxfilesize;
- struct timespec time_delta; /* server time granularity */
+ struct timespec64 time_delta; /* server time granularity */
__u32 lease_time; /* in seconds */
__u32 nlayouttypes; /* number of layouttypes */
__u32 layouttype[NFS_MAX_LAYOUT_TYPES]; /* supported pnfs layout driver */
@@ -869,7 +870,7 @@ struct nfs3_sattrargs {
struct nfs_fh * fh;
struct iattr * sattr;
unsigned int guard;
- struct timespec guardtime;
+ struct timespec64 guardtime;
};
struct nfs3_diropargs {
@@ -1435,6 +1436,7 @@ struct nfs42_copy_args {
u64 count;
bool sync;
+ struct nl4_server *cp_src;
};
struct nfs42_write_res {
@@ -1463,6 +1465,22 @@ struct nfs42_offload_status_res {
int osr_status;
};
+struct nfs42_copy_notify_args {
+ struct nfs4_sequence_args cna_seq_args;
+
+ struct nfs_fh *cna_src_fh;
+ nfs4_stateid cna_src_stateid;
+ struct nl4_server cna_dst;
+};
+
+struct nfs42_copy_notify_res {
+ struct nfs4_sequence_res cnr_seq_res;
+
+ struct nfstime4 cnr_lease_time;
+ nfs4_stateid cnr_stateid;
+ struct nl4_server cnr_src;
+};
+
struct nfs42_seek_args {
struct nfs4_sequence_args seq_args;
@@ -1622,6 +1640,7 @@ struct nfs_subversion;
struct nfs_mount_info;
struct nfs_client_initdata;
struct nfs_pageio_descriptor;
+struct fs_context;
/*
* RPC procedure vector for NFSv2/NFSv3 demuxing
@@ -1636,16 +1655,14 @@ struct nfs_rpc_ops {
int (*getroot) (struct nfs_server *, struct nfs_fh *,
struct nfs_fsinfo *);
- struct vfsmount *(*submount) (struct nfs_server *, struct dentry *,
- struct nfs_fh *, struct nfs_fattr *);
- struct dentry *(*try_mount) (int, const char *, struct nfs_mount_info *,
- struct nfs_subversion *);
+ int (*submount) (struct fs_context *, struct nfs_server *);
+ int (*try_get_tree) (struct fs_context *);
int (*getattr) (struct nfs_server *, struct nfs_fh *,
struct nfs_fattr *, struct nfs4_label *,
struct inode *);
int (*setattr) (struct dentry *, struct nfs_fattr *,
struct iattr *);
- int (*lookup) (struct inode *, const struct qstr *,
+ int (*lookup) (struct inode *, struct dentry *,
struct nfs_fh *, struct nfs_fattr *,
struct nfs4_label *);
int (*lookupp) (struct inode *, struct nfs_fh *,
@@ -1706,7 +1723,7 @@ struct nfs_rpc_ops {
struct nfs_client *(*init_client) (struct nfs_client *,
const struct nfs_client_initdata *);
void (*free_client) (struct nfs_client *);
- struct nfs_server *(*create_server)(struct nfs_mount_info *, struct nfs_subversion *);
+ struct nfs_server *(*create_server)(struct fs_context *);
struct nfs_server *(*clone_server)(struct nfs_server *, struct nfs_fh *,
struct nfs_fattr *, rpc_authflavor_t);
};
diff --git a/include/linux/notifier.h b/include/linux/notifier.h
index 0096a05395e3..018947611483 100644
--- a/include/linux/notifier.h
+++ b/include/linux/notifier.h
@@ -150,10 +150,6 @@ extern int raw_notifier_chain_register(struct raw_notifier_head *nh,
extern int srcu_notifier_chain_register(struct srcu_notifier_head *nh,
struct notifier_block *nb);
-extern int blocking_notifier_chain_cond_register(
- struct blocking_notifier_head *nh,
- struct notifier_block *nb);
-
extern int atomic_notifier_chain_unregister(struct atomic_notifier_head *nh,
struct notifier_block *nb);
extern int blocking_notifier_chain_unregister(struct blocking_notifier_head *nh,
diff --git a/include/linux/nsproxy.h b/include/linux/nsproxy.h
index 2ae1b1a4d84d..074f395b9ad2 100644
--- a/include/linux/nsproxy.h
+++ b/include/linux/nsproxy.h
@@ -35,6 +35,8 @@ struct nsproxy {
struct mnt_namespace *mnt_ns;
struct pid_namespace *pid_ns_for_children;
struct net *net_ns;
+ struct time_namespace *time_ns;
+ struct time_namespace *time_ns_for_children;
struct cgroup_namespace *cgroup_ns;
};
extern struct nsproxy init_nsproxy;
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index 10f81629b9ce..6d0d70f3219c 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -270,6 +270,8 @@ struct nvme_fc_remote_port {
*
* Host/Initiator Transport Entrypoints/Parameters:
*
+ * @module: The LLDD module using the interface
+ *
* @localport_delete: The LLDD initiates deletion of a localport via
* nvme_fc_deregister_localport(). However, the teardown is
* asynchronous. This routine is called upon the completion of the
@@ -383,6 +385,8 @@ struct nvme_fc_remote_port {
* Value is Mandatory. Allowed to be zero.
*/
struct nvme_fc_port_template {
+ struct module *module;
+
/* initiator-based functions */
void (*localport_delete)(struct nvme_fc_local_port *);
void (*remoteport_delete)(struct nvme_fc_remote_port *);
diff --git a/include/linux/nvme-fc.h b/include/linux/nvme-fc.h
index 067c9fea64fe..e8c30b39bb27 100644
--- a/include/linux/nvme-fc.h
+++ b/include/linux/nvme-fc.h
@@ -4,33 +4,60 @@
*/
/*
- * This file contains definitions relative to FC-NVME r1.14 (16-020vB).
- * The fcnvme_lsdesc_cr_assoc_cmd struct reflects expected r1.16 content.
+ * This file contains definitions relative to FC-NVME-2 r1.06
+ * (T11-2019-00210-v001).
*/
#ifndef _NVME_FC_H
#define _NVME_FC_H 1
+#include <uapi/scsi/fc/fc_fs.h>
-#define NVME_CMD_SCSI_ID 0xFD
+#define NVME_CMD_FORMAT_ID 0xFD
#define NVME_CMD_FC_ID FC_TYPE_NVME
/* FC-NVME Cmd IU Flags */
-#define FCNVME_CMD_FLAGS_DIRMASK 0x03
-#define FCNVME_CMD_FLAGS_WRITE 0x01
-#define FCNVME_CMD_FLAGS_READ 0x02
+enum {
+ FCNVME_CMD_FLAGS_DIRMASK = 0x03,
+ FCNVME_CMD_FLAGS_WRITE = (1 << 0),
+ FCNVME_CMD_FLAGS_READ = (1 << 1),
+
+ FCNVME_CMD_FLAGS_PICWP = (1 << 2),
+};
+
+enum {
+ FCNVME_CMD_CAT_MASK = 0x0F,
+ FCNVME_CMD_CAT_ADMINQ = 0x01,
+ FCNVME_CMD_CAT_CSSMASK = 0x07,
+ FCNVME_CMD_CAT_CSSFLAG = 0x08,
+};
+
+static inline __u8 fccmnd_set_cat_admin(__u8 rsv_cat)
+{
+ return (rsv_cat & ~FCNVME_CMD_CAT_MASK) | FCNVME_CMD_CAT_ADMINQ;
+}
+
+static inline __u8 fccmnd_set_cat_css(__u8 rsv_cat, __u8 css)
+{
+ return (rsv_cat & ~FCNVME_CMD_CAT_MASK) | FCNVME_CMD_CAT_CSSFLAG |
+ (css & FCNVME_CMD_CAT_CSSMASK);
+}
struct nvme_fc_cmd_iu {
- __u8 scsi_id;
+ __u8 format_id;
__u8 fc_id;
__be16 iu_len;
- __u8 rsvd4[3];
+ __u8 rsvd4[2];
+ __u8 rsv_cat;
__u8 flags;
__be64 connection_id;
__be32 csn;
__be32 data_len;
struct nvme_command sqe;
- __be32 rsvd88[2];
+ __u8 dps;
+ __u8 lbads;
+ __be16 ms;
+ __be32 rsvd92;
};
#define NVME_FC_SIZEOF_ZEROS_RSP 12
@@ -38,11 +65,12 @@ struct nvme_fc_cmd_iu {
enum {
FCNVME_SC_SUCCESS = 0,
FCNVME_SC_INVALID_FIELD = 1,
- FCNVME_SC_INVALID_CONNID = 2,
+ /* reserved 2 */
+ FCNVME_SC_ILL_CONN_PARAMS = 3,
};
struct nvme_fc_ersp_iu {
- __u8 status_code;
+ __u8 ersp_result;
__u8 rsvd1;
__be16 iu_len;
__be32 rsn;
@@ -53,14 +81,44 @@ struct nvme_fc_ersp_iu {
};
-/* FC-NVME Link Services */
+#define FCNVME_NVME_SR_OPCODE 0x01
+
+struct nvme_fc_nvme_sr_iu {
+ __u8 fc_id;
+ __u8 opcode;
+ __u8 rsvd2;
+ __u8 retry_rctl;
+ __be32 rsvd4;
+};
+
+
+enum {
+ FCNVME_SRSTAT_ACC = 0x0,
+ FCNVME_SRSTAT_INV_FCID = 0x1,
+ /* reserved 0x2 */
+ FCNVME_SRSTAT_LOGICAL_ERR = 0x3,
+ FCNVME_SRSTAT_INV_QUALIF = 0x4,
+ FCNVME_SRSTAT_UNABL2PERFORM = 0x9,
+};
+
+struct nvme_fc_nvme_sr_rsp_iu {
+ __u8 fc_id;
+ __u8 opcode;
+ __u8 rsvd2;
+ __u8 status;
+ __be32 rsvd4;
+};
+
+
+/* FC-NVME Link Services - LS cmd values (w0 bits 31:24) */
enum {
FCNVME_LS_RSVD = 0,
FCNVME_LS_RJT = 1,
FCNVME_LS_ACC = 2,
- FCNVME_LS_CREATE_ASSOCIATION = 3,
- FCNVME_LS_CREATE_CONNECTION = 4,
- FCNVME_LS_DISCONNECT = 5,
+ FCNVME_LS_CREATE_ASSOCIATION = 3, /* Create Association */
+ FCNVME_LS_CREATE_CONNECTION = 4, /* Create I/O Connection */
+ FCNVME_LS_DISCONNECT_ASSOC = 5, /* Disconnect Association */
+ FCNVME_LS_DISCONNECT_CONN = 6, /* Disconnect Connection */
};
/* FC-NVME Link Service Descriptors */
@@ -117,14 +175,17 @@ enum fcnvme_ls_rjt_reason {
FCNVME_RJT_RC_UNSUP = 0x0b,
/* command not supported */
- FCNVME_RJT_RC_INPROG = 0x0e,
- /* command already in progress */
-
FCNVME_RJT_RC_INV_ASSOC = 0x40,
- /* Invalid Association ID*/
+ /* Invalid Association ID */
FCNVME_RJT_RC_INV_CONN = 0x41,
- /* Invalid Connection ID*/
+ /* Invalid Connection ID */
+
+ FCNVME_RJT_RC_INV_PARAM = 0x42,
+ /* Invalid Parameters */
+
+ FCNVME_RJT_RC_INSUF_RES = 0x43,
+ /* Insufficient Resources */
FCNVME_RJT_RC_VENDOR = 0xff,
/* vendor specific error */
@@ -138,14 +199,32 @@ enum fcnvme_ls_rjt_explan {
FCNVME_RJT_EXP_OXID_RXID = 0x17,
/* invalid OX_ID-RX_ID combination */
- FCNVME_RJT_EXP_INSUF_RES = 0x29,
- /* insufficient resources */
-
FCNVME_RJT_EXP_UNAB_DATA = 0x2a,
/* unable to supply requested data */
FCNVME_RJT_EXP_INV_LEN = 0x2d,
/* Invalid payload length */
+
+ FCNVME_RJT_EXP_INV_ERSP_RAT = 0x40,
+ /* Invalid NVMe_ERSP Ratio */
+
+ FCNVME_RJT_EXP_INV_CTLR_ID = 0x41,
+ /* Invalid Controller ID */
+
+ FCNVME_RJT_EXP_INV_QUEUE_ID = 0x42,
+ /* Invalid Queue ID */
+
+ FCNVME_RJT_EXP_INV_SQSIZE = 0x43,
+ /* Invalid Submission Queue Size */
+
+ FCNVME_RJT_EXP_INV_HOSTID = 0x44,
+ /* Invalid HOST ID */
+
+ FCNVME_RJT_EXP_INV_HOSTNQN = 0x45,
+ /* Invalid HOSTNQN */
+
+ FCNVME_RJT_EXP_INV_SUBNQN = 0x46,
+ /* Invalid SUBNQN */
};
/* FCNVME_LSDESC_RJT */
@@ -209,21 +288,11 @@ struct fcnvme_lsdesc_cr_conn_cmd {
__be32 rsvd52;
};
-/* Disconnect Scope Values */
-enum {
- FCNVME_DISCONN_ASSOCIATION = 0,
- FCNVME_DISCONN_CONNECTION = 1,
-};
-
/* FCNVME_LSDESC_DISCONN_CMD */
struct fcnvme_lsdesc_disconn_cmd {
__be32 desc_tag; /* FCNVME_LSDESC_xxx */
__be32 desc_len;
- u8 rsvd8[3];
- /* note: scope is really a 1 bit field */
- u8 scope; /* FCNVME_DISCONN_xxx */
- __be32 rsvd12;
- __be64 id;
+ __be32 rsvd8[4];
};
/* FCNVME_LSDESC_CONN_ID */
@@ -242,9 +311,14 @@ struct fcnvme_lsdesc_assoc_id {
/* r_ctl values */
enum {
- FCNVME_RS_RCTL_DATA = 1,
- FCNVME_RS_RCTL_XFER_RDY = 5,
- FCNVME_RS_RCTL_RSP = 8,
+ FCNVME_RS_RCTL_CMND = 0x6,
+ FCNVME_RS_RCTL_DATA = 0x1,
+ FCNVME_RS_RCTL_CONF = 0x3,
+ FCNVME_RS_RCTL_SR = 0x9,
+ FCNVME_RS_RCTL_XFER_RDY = 0x5,
+ FCNVME_RS_RCTL_RSP = 0x7,
+ FCNVME_RS_RCTL_ERSP = 0x8,
+ FCNVME_RS_RCTL_SR_RSP = 0xA,
};
@@ -264,7 +338,10 @@ struct fcnvme_ls_acc_hdr {
struct fcnvme_ls_rqst_w0 w0;
__be32 desc_list_len;
struct fcnvme_lsdesc_rqst rqst;
- /* Followed by cmd-specific ACC descriptors, see next definitions */
+ /*
+ * Followed by cmd-specific ACCEPT descriptors, see xxx_acc
+ * definitions below
+ */
};
/* FCNVME_LS_CREATE_ASSOCIATION */
@@ -302,25 +379,39 @@ struct fcnvme_ls_cr_conn_acc {
struct fcnvme_lsdesc_conn_id connectid;
};
-/* FCNVME_LS_DISCONNECT */
-struct fcnvme_ls_disconnect_rqst {
+/* FCNVME_LS_DISCONNECT_ASSOC */
+struct fcnvme_ls_disconnect_assoc_rqst {
struct fcnvme_ls_rqst_w0 w0;
__be32 desc_list_len;
struct fcnvme_lsdesc_assoc_id associd;
struct fcnvme_lsdesc_disconn_cmd discon_cmd;
};
-struct fcnvme_ls_disconnect_acc {
+struct fcnvme_ls_disconnect_assoc_acc {
+ struct fcnvme_ls_acc_hdr hdr;
+};
+
+
+/* FCNVME_LS_DISCONNECT_CONN */
+struct fcnvme_ls_disconnect_conn_rqst {
+ struct fcnvme_ls_rqst_w0 w0;
+ __be32 desc_list_len;
+ struct fcnvme_lsdesc_assoc_id associd;
+ struct fcnvme_lsdesc_disconn_cmd connectid;
+};
+
+struct fcnvme_ls_disconnect_conn_acc {
struct fcnvme_ls_acc_hdr hdr;
};
/*
- * Yet to be defined in FC-NVME:
+ * Default R_A_TOV is pulled in from fc_fs.h but needs conversion
+ * from ms to seconds for our use.
*/
-#define NVME_FC_CONNECT_TIMEOUT_SEC 2 /* 2 seconds */
-#define NVME_FC_LS_TIMEOUT_SEC 2 /* 2 seconds */
-#define NVME_FC_TGTOP_TIMEOUT_SEC 2 /* 2 seconds */
+#define FC_TWO_TIMES_R_A_TOV (2 * (FC_DEF_R_A_TOV / 1000))
+#define NVME_FC_LS_TIMEOUT_SEC FC_TWO_TIMES_R_A_TOV
+#define NVME_FC_TGTOP_TIMEOUT_SEC FC_TWO_TIMES_R_A_TOV
/*
* TRADDR string must be of form "nn-<16hexdigits>:pn-<16hexdigits>"
@@ -328,6 +419,7 @@ struct fcnvme_ls_disconnect_acc {
* infront of the <16hexdigits>. Without is considered the "min" string
* and with is considered the "max" string. The hexdigits may be upper
* or lower case.
+ * Note: FC-NVME-2 standard requires a "0x" prefix.
*/
#define NVME_FC_TRADDR_NNLEN 3 /* "?n-" */
#define NVME_FC_TRADDR_OXNNLEN 5 /* "?n-0x" */
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index f61d6906e59d..3d5189f46cb1 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -107,8 +107,22 @@ enum {
NVME_REG_AQA = 0x0024, /* Admin Queue Attributes */
NVME_REG_ASQ = 0x0028, /* Admin SQ Base Address */
NVME_REG_ACQ = 0x0030, /* Admin CQ Base Address */
- NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
+ NVME_REG_CMBLOC = 0x0038, /* Controller Memory Buffer Location */
NVME_REG_CMBSZ = 0x003c, /* Controller Memory Buffer Size */
+ NVME_REG_BPINFO = 0x0040, /* Boot Partition Information */
+ NVME_REG_BPRSEL = 0x0044, /* Boot Partition Read Select */
+ NVME_REG_BPMBL = 0x0048, /* Boot Partition Memory Buffer
+ * Location
+ */
+ NVME_REG_PMRCAP = 0x0e00, /* Persistent Memory Capabilities */
+ NVME_REG_PMRCTL = 0x0e04, /* Persistent Memory Region Control */
+ NVME_REG_PMRSTS = 0x0e08, /* Persistent Memory Region Status */
+ NVME_REG_PMREBS = 0x0e0c, /* Persistent Memory Region Elasticity
+ * Buffer Size
+ */
+ NVME_REG_PMRSWTP = 0x0e10, /* Persistent Memory Region Sustained
+ * Write Throughput
+ */
NVME_REG_DBS = 0x1000, /* SQ 0 Tail Doorbell */
};
@@ -295,6 +309,14 @@ enum {
NVME_CTRL_OACS_DIRECTIVES = 1 << 5,
NVME_CTRL_OACS_DBBUF_SUPP = 1 << 8,
NVME_CTRL_LPA_CMD_EFFECTS_LOG = 1 << 1,
+ NVME_CTRL_CTRATT_128_ID = 1 << 0,
+ NVME_CTRL_CTRATT_NON_OP_PSP = 1 << 1,
+ NVME_CTRL_CTRATT_NVM_SETS = 1 << 2,
+ NVME_CTRL_CTRATT_READ_RECV_LVLS = 1 << 3,
+ NVME_CTRL_CTRATT_ENDURANCE_GROUPS = 1 << 4,
+ NVME_CTRL_CTRATT_PREDICTABLE_LAT = 1 << 5,
+ NVME_CTRL_CTRATT_NAMESPACE_GRANULARITY = 1 << 7,
+ NVME_CTRL_CTRATT_UUID_LIST = 1 << 9,
};
struct nvme_lbaf {
@@ -352,6 +374,9 @@ enum {
NVME_ID_CNS_NS_PRESENT = 0x11,
NVME_ID_CNS_CTRL_NS_LIST = 0x12,
NVME_ID_CNS_CTRL_LIST = 0x13,
+ NVME_ID_CNS_SCNDRY_CTRL_LIST = 0x15,
+ NVME_ID_CNS_NS_GRANULARITY = 0x16,
+ NVME_ID_CNS_UUID_LIST = 0x17,
};
enum {
@@ -409,7 +434,8 @@ struct nvme_smart_log {
__u8 avail_spare;
__u8 spare_thresh;
__u8 percent_used;
- __u8 rsvd6[26];
+ __u8 endu_grp_crit_warn_sumry;
+ __u8 rsvd7[25];
__u8 data_units_read[16];
__u8 data_units_written[16];
__u8 host_reads[16];
@@ -423,7 +449,11 @@ struct nvme_smart_log {
__le32 warning_temp_time;
__le32 critical_comp_time;
__le16 temp_sensor[8];
- __u8 rsvd216[296];
+ __le32 thm_temp1_trans_count;
+ __le32 thm_temp2_trans_count;
+ __le32 thm_temp1_total_time;
+ __le32 thm_temp2_total_time;
+ __u8 rsvd232[280];
};
struct nvme_fw_slot_info_log {
@@ -440,6 +470,7 @@ enum {
NVME_CMD_EFFECTS_NIC = 1 << 3,
NVME_CMD_EFFECTS_CCC = 1 << 4,
NVME_CMD_EFFECTS_CSE_MASK = 3 << 16,
+ NVME_CMD_EFFECTS_UUID_SEL = 1 << 19,
};
struct nvme_effects_log {
@@ -563,6 +594,7 @@ enum nvme_opcode {
nvme_cmd_compare = 0x05,
nvme_cmd_write_zeroes = 0x08,
nvme_cmd_dsm = 0x09,
+ nvme_cmd_verify = 0x0c,
nvme_cmd_resv_register = 0x0d,
nvme_cmd_resv_report = 0x0e,
nvme_cmd_resv_acquire = 0x11,
@@ -772,6 +804,12 @@ struct nvme_write_zeroes_cmd {
/* Features */
+enum {
+ NVME_TEMP_THRESH_MASK = 0xffff,
+ NVME_TEMP_THRESH_SELECT_SHIFT = 16,
+ NVME_TEMP_THRESH_TYPE_UNDER = 0x100000,
+};
+
struct nvme_feat_auto_pst {
__le64 entries[32];
};
@@ -806,10 +844,14 @@ enum nvme_admin_opcode {
nvme_admin_ns_mgmt = 0x0d,
nvme_admin_activate_fw = 0x10,
nvme_admin_download_fw = 0x11,
+ nvme_admin_dev_self_test = 0x14,
nvme_admin_ns_attach = 0x15,
nvme_admin_keep_alive = 0x18,
nvme_admin_directive_send = 0x19,
nvme_admin_directive_recv = 0x1a,
+ nvme_admin_virtual_mgmt = 0x1c,
+ nvme_admin_nvme_mi_send = 0x1d,
+ nvme_admin_nvme_mi_recv = 0x1e,
nvme_admin_dbbuf = 0x7C,
nvme_admin_format_nvm = 0x80,
nvme_admin_security_send = 0x81,
@@ -873,6 +915,7 @@ enum {
NVME_FEAT_PLM_CONFIG = 0x13,
NVME_FEAT_PLM_WINDOW = 0x14,
NVME_FEAT_HOST_BEHAVIOR = 0x16,
+ NVME_FEAT_SANITIZE = 0x17,
NVME_FEAT_SW_PROGRESS = 0x80,
NVME_FEAT_HOST_ID = 0x81,
NVME_FEAT_RESV_MASK = 0x82,
@@ -883,6 +926,10 @@ enum {
NVME_LOG_FW_SLOT = 0x03,
NVME_LOG_CHANGED_NS = 0x04,
NVME_LOG_CMD_EFFECTS = 0x05,
+ NVME_LOG_DEVICE_SELF_TEST = 0x06,
+ NVME_LOG_TELEMETRY_HOST = 0x07,
+ NVME_LOG_TELEMETRY_CTRL = 0x08,
+ NVME_LOG_ENDURANCE_GROUP = 0x09,
NVME_LOG_ANA = 0x0c,
NVME_LOG_DISC = 0x70,
NVME_LOG_RESERVATION = 0x80,
@@ -1290,7 +1337,11 @@ enum {
NVME_SC_SGL_INVALID_OFFSET = 0x16,
NVME_SC_SGL_INVALID_SUBTYPE = 0x17,
+ NVME_SC_SANITIZE_FAILED = 0x1C,
+ NVME_SC_SANITIZE_IN_PROGRESS = 0x1D,
+
NVME_SC_NS_WRITE_PROTECTED = 0x20,
+ NVME_SC_CMD_INTERRUPTED = 0x21,
NVME_SC_LBA_RANGE = 0x80,
NVME_SC_CAP_EXCEEDED = 0x81,
@@ -1328,6 +1379,8 @@ enum {
NVME_SC_NS_NOT_ATTACHED = 0x11a,
NVME_SC_THIN_PROV_NOT_SUPP = 0x11b,
NVME_SC_CTRL_LIST_INVALID = 0x11c,
+ NVME_SC_BP_WRITE_PROHIBITED = 0x11e,
+ NVME_SC_PMR_SAN_PROHIBITED = 0x123,
/*
* I/O Command Set Specific - NVM commands:
@@ -1368,6 +1421,7 @@ enum {
NVME_SC_ANA_INACCESSIBLE = 0x302,
NVME_SC_ANA_TRANSITION = 0x303,
NVME_SC_HOST_PATH_ERROR = 0x370,
+ NVME_SC_HOST_ABORTED_CMD = 0x371,
NVME_SC_CRD = 0x1800,
NVME_SC_DNR = 0x4000,
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index 8f8be5b00060..d3776be48c53 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -89,6 +89,9 @@ void nvmem_del_cell_lookups(struct nvmem_cell_lookup *entries,
int nvmem_register_notifier(struct notifier_block *nb);
int nvmem_unregister_notifier(struct notifier_block *nb);
+struct nvmem_device *nvmem_device_find(void *data,
+ int (*match)(struct device *dev, const void *data));
+
#else
static inline struct nvmem_cell *nvmem_cell_get(struct device *dev,
@@ -118,7 +121,7 @@ static inline void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
}
static inline int nvmem_cell_write(struct nvmem_cell *cell,
- const char *buf, size_t len)
+ void *buf, size_t len)
{
return -EOPNOTSUPP;
}
@@ -204,6 +207,12 @@ static inline int nvmem_unregister_notifier(struct notifier_block *nb)
return -EOPNOTSUPP;
}
+static inline struct nvmem_device *nvmem_device_find(void *data,
+ int (*match)(struct device *dev, const void *data))
+{
+ return NULL;
+}
+
#endif /* CONFIG_NVMEM */
#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index fe051323be0a..6d6f8e5d24c9 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -11,6 +11,7 @@
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/gpio/consumer.h>
struct nvmem_device;
struct nvmem_cell_info;
@@ -45,6 +46,7 @@ enum nvmem_type {
* @word_size: Minimum read/write access granularity.
* @stride: Minimum read/write access stride.
* @priv: User context passed to read/write callbacks.
+ * @wp-gpio: Write protect pin
*
* Note: A default "nvmem<id>" name will be assigned to the device if
* no name is specified in its configuration. In such case "<id>" is
@@ -58,6 +60,7 @@ struct nvmem_config {
const char *name;
int id;
struct module *owner;
+ struct gpio_desc *wp_gpio;
const struct nvmem_cell_info *cells;
int ncells;
enum nvmem_type type;
diff --git a/include/linux/of.h b/include/linux/of.h
index 844f89e1b039..c669c0a4732f 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -351,6 +351,8 @@ extern const void *of_get_property(const struct device_node *node,
int *lenp);
extern struct device_node *of_get_cpu_node(int cpu, unsigned int *thread);
extern struct device_node *of_get_next_cpu_node(struct device_node *prev);
+extern struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
+ int index);
#define for_each_property_of_node(dn, pp) \
for (pp = dn->properties; pp != NULL; pp = pp->next)
@@ -765,6 +767,12 @@ static inline struct device_node *of_get_next_cpu_node(struct device_node *prev)
return NULL;
}
+static inline struct device_node *of_get_cpu_state_node(struct device_node *cpu_node,
+ int index)
+{
+ return NULL;
+}
+
static inline int of_n_addr_cells(struct device_node *np)
{
return 0;
diff --git a/include/linux/of_address.h b/include/linux/of_address.h
index 30e40fb6936b..eac7ab109df4 100644
--- a/include/linux/of_address.h
+++ b/include/linux/of_address.h
@@ -12,6 +12,7 @@ struct of_pci_range_parser {
const __be32 *end;
int np;
int pna;
+ bool dma;
};
struct of_pci_range {
@@ -33,10 +34,6 @@ extern u64 of_translate_dma_address(struct device_node *dev,
extern u64 of_translate_address(struct device_node *np, const __be32 *addr);
extern int of_address_to_resource(struct device_node *dev, int index,
struct resource *r);
-extern struct device_node *of_find_matching_node_by_address(
- struct device_node *from,
- const struct of_device_id *matches,
- u64 base_address);
extern void __iomem *of_iomap(struct device_node *device, int index);
void __iomem *of_io_request_and_map(struct device_node *device,
int index, const char *name);
@@ -55,8 +52,6 @@ extern int of_pci_dma_range_parser_init(struct of_pci_range_parser *parser,
extern struct of_pci_range *of_pci_range_parser_one(
struct of_pci_range_parser *parser,
struct of_pci_range *range);
-extern int of_dma_get_range(struct device_node *np, u64 *dma_addr,
- u64 *paddr, u64 *size);
extern bool of_dma_is_coherent(struct device_node *np);
#else /* CONFIG_OF_ADDRESS */
static inline void __iomem *of_io_request_and_map(struct device_node *device,
@@ -71,14 +66,6 @@ static inline u64 of_translate_address(struct device_node *np,
return OF_BAD_ADDR;
}
-static inline struct device_node *of_find_matching_node_by_address(
- struct device_node *from,
- const struct of_device_id *matches,
- u64 base_address)
-{
- return NULL;
-}
-
static inline const __be32 *of_get_address(struct device_node *dev, int index,
u64 *size, unsigned int *flags)
{
@@ -104,12 +91,6 @@ static inline struct of_pci_range *of_pci_range_parser_one(
return NULL;
}
-static inline int of_dma_get_range(struct device_node *np, u64 *dma_addr,
- u64 *paddr, u64 *size)
-{
- return -ENODEV;
-}
-
static inline bool of_dma_is_coherent(struct device_node *np)
{
return false;
diff --git a/include/linux/of_clk.h b/include/linux/of_clk.h
index b27da9f164cb..31b73a0da9db 100644
--- a/include/linux/of_clk.h
+++ b/include/linux/of_clk.h
@@ -6,19 +6,22 @@
#ifndef __LINUX_OF_CLK_H
#define __LINUX_OF_CLK_H
+struct device_node;
+struct of_device_id;
+
#if defined(CONFIG_COMMON_CLK) && defined(CONFIG_OF)
-unsigned int of_clk_get_parent_count(struct device_node *np);
-const char *of_clk_get_parent_name(struct device_node *np, int index);
+unsigned int of_clk_get_parent_count(const struct device_node *np);
+const char *of_clk_get_parent_name(const struct device_node *np, int index);
void of_clk_init(const struct of_device_id *matches);
#else /* !CONFIG_COMMON_CLK || !CONFIG_OF */
-static inline unsigned int of_clk_get_parent_count(struct device_node *np)
+static inline unsigned int of_clk_get_parent_count(const struct device_node *np)
{
return 0;
}
-static inline const char *of_clk_get_parent_name(struct device_node *np,
+static inline const char *of_clk_get_parent_name(const struct device_node *np,
int index)
{
return NULL;
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index 99cefe6f5edb..491a2b7e77c1 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -12,6 +12,7 @@
#include <linux/of.h>
#if IS_ENABLED(CONFIG_OF_MDIO)
+extern bool of_mdiobus_child_is_phy(struct device_node *child);
extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
extern struct phy_device *of_phy_find_device(struct device_node *phy_np);
extern struct phy_device *of_phy_connect(struct net_device *dev,
@@ -54,6 +55,11 @@ static inline int of_mdio_parse_addr(struct device *dev,
}
#else /* CONFIG_OF_MDIO */
+static inline bool of_mdiobus_child_is_phy(struct device_node *child)
+{
+ return false;
+}
+
static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
{
/*
diff --git a/include/linux/of_net.h b/include/linux/of_net.h
index 6aeaea1775e6..71bbfcf3adcd 100644
--- a/include/linux/of_net.h
+++ b/include/linux/of_net.h
@@ -6,15 +6,18 @@
#ifndef __LINUX_OF_NET_H
#define __LINUX_OF_NET_H
+#include <linux/phy.h>
+
#ifdef CONFIG_OF_NET
#include <linux/of.h>
struct net_device;
-extern int of_get_phy_mode(struct device_node *np);
+extern int of_get_phy_mode(struct device_node *np, phy_interface_t *interface);
extern const void *of_get_mac_address(struct device_node *np);
extern struct net_device *of_find_net_device_by_node(struct device_node *np);
#else
-static inline int of_get_phy_mode(struct device_node *np)
+static inline int of_get_phy_mode(struct device_node *np,
+ phy_interface_t *interface)
{
return -ENODEV;
}
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 21a89c4880fa..29658c0ee71f 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -2,11 +2,10 @@
#ifndef __OF_PCI_H
#define __OF_PCI_H
-#include <linux/pci.h>
-#include <linux/msi.h>
+#include <linux/types.h>
+#include <linux/errno.h>
struct pci_dev;
-struct of_phandle_args;
struct device_node;
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_PCI)
diff --git a/include/linux/omap-dma.h b/include/linux/omap-dma.h
index ba3cfbb52312..5c5c93ad6b50 100644
--- a/include/linux/omap-dma.h
+++ b/include/linux/omap-dma.h
@@ -129,7 +129,6 @@
#define IS_WORD_16 BIT(0xd)
#define ENABLE_16XX_MODE BIT(0xe)
#define HS_CHANNELS_RESERVED BIT(0xf)
-#define DMA_ENGINE_HANDLE_IRQ BIT(0x10)
/* Defines for DMA Capabilities */
#define DMA_HAS_TRANSPARENT_CAPS (0x1 << 18)
@@ -239,9 +238,6 @@ struct omap_dma_lch {
void (*callback)(int lch, u16 ch_status, void *data);
void *data;
long flags;
- /* required for Dynamic chaining */
- int prev_linked_ch;
- int next_linked_ch;
int state;
int chain_id;
int status;
@@ -303,7 +299,6 @@ extern void omap_set_dma_priority(int lch, int dst_port, int priority);
extern int omap_request_dma(int dev_id, const char *dev_name,
void (*callback)(int lch, u16 ch_status, void *data),
void *data, int *dma_ch);
-extern void omap_enable_dma_irq(int ch, u16 irq_bits);
extern void omap_disable_dma_irq(int ch, u16 irq_bits);
extern void omap_free_dma(int ch);
extern void omap_start_dma(int lch);
@@ -312,7 +307,6 @@ extern void omap_set_dma_transfer_params(int lch, int data_type,
int elem_count, int frame_count,
int sync_mode,
int dma_trigger, int src_or_dst_synch);
-extern void omap_set_dma_write_mode(int lch, enum omap_dma_write_mode mode);
extern void omap_set_dma_channel_mode(int lch, enum omap_dma_channel_mode mode);
extern void omap_set_dma_src_params(int lch, int src_port, int src_amode,
@@ -329,22 +323,10 @@ extern void omap_set_dma_dest_data_pack(int lch, int enable);
extern void omap_set_dma_dest_burst_mode(int lch,
enum omap_dma_burst_mode burst_mode);
-extern void omap_set_dma_params(int lch,
- struct omap_dma_channel_params *params);
-
-extern void omap_dma_link_lch(int lch_head, int lch_queue);
-
-extern int omap_set_dma_callback(int lch,
- void (*callback)(int lch, u16 ch_status, void *data),
- void *data);
extern dma_addr_t omap_get_dma_src_pos(int lch);
extern dma_addr_t omap_get_dma_dst_pos(int lch);
extern int omap_get_dma_active_status(int lch);
extern int omap_dma_running(void);
-extern void omap_dma_set_global_params(int arb_rate, int max_fifo_depth,
- int tparams);
-void omap_dma_global_context_save(void);
-void omap_dma_global_context_restore(void);
#if defined(CONFIG_ARCH_OMAP1) && IS_ENABLED(CONFIG_FB_OMAP)
#include <mach/lcd_dma.h>
diff --git a/include/linux/padata.h b/include/linux/padata.h
index 23717eeaad23..a0d8b41850b2 100644
--- a/include/linux/padata.h
+++ b/include/linux/padata.h
@@ -9,17 +9,17 @@
#ifndef PADATA_H
#define PADATA_H
+#include <linux/compiler_types.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
#include <linux/list.h>
-#include <linux/notifier.h>
#include <linux/kobject.h>
#define PADATA_CPU_SERIAL 0x01
#define PADATA_CPU_PARALLEL 0x02
/**
- * struct padata_priv - Embedded to the users data structure.
+ * struct padata_priv - Represents one job
*
* @list: List entry, to attach to the padata lists.
* @pd: Pointer to the internal control structure.
@@ -42,7 +42,7 @@ struct padata_priv {
};
/**
- * struct padata_list
+ * struct padata_list - one per work type per CPU
*
* @list: List head.
* @lock: List lock.
@@ -70,9 +70,6 @@ struct padata_serial_queue {
*
* @parallel: List to wait for parallelization.
* @reorder: List to wait for reordering after parallel processing.
- * @serial: List to wait for serialization after reordering.
- * @pwork: work struct for parallelization.
- * @swork: work struct for serialization.
* @work: work struct for parallelization.
* @num_obj: Number of objects that are processed by this cpu.
*/
@@ -98,12 +95,11 @@ struct padata_cpumask {
* struct parallel_data - Internal control structure, covers everything
* that depends on the cpumask in use.
*
- * @pinst: padata instance.
+ * @ps: padata_shell object.
* @pqueue: percpu padata queues used for parallelization.
* @squeue: percpu padata queues used for serialuzation.
- * @reorder_objects: Number of objects waiting in the reorder queues.
* @refcnt: Number of objects holding a reference on this parallel_data.
- * @max_seq_nr: Maximal used sequence number.
+ * @seq_nr: Sequence number of the parallelized data object.
* @processed: Number of already processed objects.
* @cpu: Next CPU to be processed.
* @cpumask: The cpumasks in use for parallel and serial workers.
@@ -111,30 +107,44 @@ struct padata_cpumask {
* @lock: Reorder lock.
*/
struct parallel_data {
- struct padata_instance *pinst;
+ struct padata_shell *ps;
struct padata_parallel_queue __percpu *pqueue;
struct padata_serial_queue __percpu *squeue;
- atomic_t reorder_objects;
atomic_t refcnt;
atomic_t seq_nr;
unsigned int processed;
int cpu;
struct padata_cpumask cpumask;
struct work_struct reorder_work;
- spinlock_t lock ____cacheline_aligned;
+ spinlock_t ____cacheline_aligned lock;
+};
+
+/**
+ * struct padata_shell - Wrapper around struct parallel_data, its
+ * purpose is to allow the underlying control structure to be replaced
+ * on the fly using RCU.
+ *
+ * @pinst: padat instance.
+ * @pd: Actual parallel_data structure which may be substituted on the fly.
+ * @opd: Pointer to old pd to be freed by padata_replace.
+ * @list: List entry in padata_instance list.
+ */
+struct padata_shell {
+ struct padata_instance *pinst;
+ struct parallel_data __rcu *pd;
+ struct parallel_data *opd;
+ struct list_head list;
};
/**
* struct padata_instance - The overall control structure.
*
- * @cpu_notifier: cpu hotplug notifier.
+ * @node: Used by CPU hotplug.
* @parallel_wq: The workqueue used for parallel work.
* @serial_wq: The workqueue used for serial work.
- * @pd: The internal control structure.
+ * @pslist: List of padata_shell objects attached to this instance.
* @cpumask: User supplied cpumasks for parallel and serial works.
- * @cpumask_change_notifier: Notifiers chain for user-defined notify
- * callbacks that will be called when either @pcpu or @cbcpu
- * or both cpumasks change.
+ * @rcpumask: Actual cpumasks based on user cpumask and cpu_online_mask.
* @kobj: padata instance kernel object.
* @lock: padata instance lock.
* @flags: padata flags.
@@ -143,9 +153,9 @@ struct padata_instance {
struct hlist_node node;
struct workqueue_struct *parallel_wq;
struct workqueue_struct *serial_wq;
- struct parallel_data *pd;
+ struct list_head pslist;
struct padata_cpumask cpumask;
- struct blocking_notifier_head cpumask_change_notifier;
+ struct padata_cpumask rcpumask;
struct kobject kobj;
struct mutex lock;
u8 flags;
@@ -156,15 +166,13 @@ struct padata_instance {
extern struct padata_instance *padata_alloc_possible(const char *name);
extern void padata_free(struct padata_instance *pinst);
-extern int padata_do_parallel(struct padata_instance *pinst,
+extern struct padata_shell *padata_alloc_shell(struct padata_instance *pinst);
+extern void padata_free_shell(struct padata_shell *ps);
+extern int padata_do_parallel(struct padata_shell *ps,
struct padata_priv *padata, int *cb_cpu);
extern void padata_do_serial(struct padata_priv *padata);
extern int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
cpumask_var_t cpumask);
extern int padata_start(struct padata_instance *pinst);
extern void padata_stop(struct padata_instance *pinst);
-extern int padata_register_cpumask_notifier(struct padata_instance *pinst,
- struct notifier_block *nblock);
-extern int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
- struct notifier_block *nblock);
#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 1bf83c8fcaa7..77de28bfefb0 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -311,7 +311,7 @@ static inline int TestClearPage##uname(struct page *page) { return 0; }
__PAGEFLAG(Locked, locked, PF_NO_TAIL)
PAGEFLAG(Waiters, waiters, PF_ONLY_HEAD) __CLEARPAGEFLAG(Waiters, waiters, PF_ONLY_HEAD)
-PAGEFLAG(Error, error, PF_NO_COMPOUND) TESTCLEARFLAG(Error, error, PF_NO_COMPOUND)
+PAGEFLAG(Error, error, PF_NO_TAIL) TESTCLEARFLAG(Error, error, PF_NO_TAIL)
PAGEFLAG(Referenced, referenced, PF_HEAD)
TESTCLEARFLAG(Referenced, referenced, PF_HEAD)
__SETPAGEFLAG(Referenced, referenced, PF_HEAD)
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 1099c2fee20f..572458016331 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -30,11 +30,11 @@ static inline bool is_migrate_isolate(int migratetype)
}
#endif
-#define SKIP_HWPOISON 0x1
+#define MEMORY_OFFLINE 0x1
#define REPORT_FAILURE 0x2
-bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
- int migratetype, int flags);
+struct page *has_unmovable_pages(struct zone *zone, struct page *page,
+ int migratetype, int flags);
void set_pageblock_migratetype(struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page,
int migratetype, int *num_movable);
@@ -58,7 +58,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
* Test all pages in [start_pfn, end_pfn) are isolated or not.
*/
int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn,
- bool skip_hwpoisoned_pages);
+ int isol_flags);
struct page *alloc_migrate_target(struct page *page, unsigned long private);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 37a4d9e32cd3..ccb14b6a16b5 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -636,4 +636,32 @@ static inline unsigned long dir_pages(struct inode *inode)
PAGE_SHIFT;
}
+/**
+ * page_mkwrite_check_truncate - check if page was truncated
+ * @page: the page to check
+ * @inode: the inode to check the page against
+ *
+ * Returns the number of bytes in the page up to EOF,
+ * or -EFAULT if the page was truncated.
+ */
+static inline int page_mkwrite_check_truncate(struct page *page,
+ struct inode *inode)
+{
+ loff_t size = i_size_read(inode);
+ pgoff_t index = size >> PAGE_SHIFT;
+ int offset = offset_in_page(size);
+
+ if (page->mapping != inode->i_mapping)
+ return -EFAULT;
+
+ /* page is wholly inside EOF */
+ if (page->index < index)
+ return PAGE_SIZE;
+ /* page is wholly past EOF */
+ if (page->index > index || !offset)
+ return -EFAULT;
+ /* page is partially inside EOF */
+ return offset;
+}
+
#endif /* _LINUX_PAGEMAP_H */
diff --git a/include/linux/pagewalk.h b/include/linux/pagewalk.h
index bddd9759bab9..b1cb6b753abb 100644
--- a/include/linux/pagewalk.h
+++ b/include/linux/pagewalk.h
@@ -8,24 +8,37 @@ struct mm_walk;
/**
* mm_walk_ops - callbacks for walk_page_range
- * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
- * this handler should only handle pud_trans_huge() puds.
- * the pmd_entry or pte_entry callbacks will be used for
- * regular PUDs.
- * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
+ * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
+ * @p4d_entry: if set, called for each non-empty P4D entry
+ * @pud_entry: if set, called for each non-empty PUD entry
+ * @pmd_entry: if set, called for each non-empty PMD entry
* this handler is required to be able to handle
* pmd_trans_huge() pmds. They may simply choose to
* split_huge_page() instead of handling it explicitly.
- * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
- * @pte_hole: if set, called for each hole at all levels
+ * @pte_entry: if set, called for each non-empty PTE (lowest-level)
+ * entry
+ * @pte_hole: if set, called for each hole at all levels,
+ * depth is -1 if not known, 0:PGD, 1:P4D, 2:PUD, 3:PMD
+ * 4:PTE. Any folded depths (where PTRS_PER_P?D is equal
+ * to 1) are skipped.
* @hugetlb_entry: if set, called for each hugetlb entry
* @test_walk: caller specific callback function to determine whether
* we walk over the current vma or not. Returning 0 means
* "do page table walk over the current vma", returning
* a negative value means "abort current page table walk
* right now" and returning 1 means "skip the current vma"
+ * @pre_vma: if set, called before starting walk on a non-null vma.
+ * @post_vma: if set, called after a walk on a non-null vma, provided
+ * that @pre_vma and the vma walk succeeded.
+ *
+ * p?d_entry callbacks are called even if those levels are folded on a
+ * particular architecture/configuration.
*/
struct mm_walk_ops {
+ int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk);
+ int (*p4d_entry)(p4d_t *p4d, unsigned long addr,
+ unsigned long next, struct mm_walk *walk);
int (*pud_entry)(pud_t *pud, unsigned long addr,
unsigned long next, struct mm_walk *walk);
int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
@@ -33,19 +46,38 @@ struct mm_walk_ops {
int (*pte_entry)(pte_t *pte, unsigned long addr,
unsigned long next, struct mm_walk *walk);
int (*pte_hole)(unsigned long addr, unsigned long next,
- struct mm_walk *walk);
+ int depth, struct mm_walk *walk);
int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
unsigned long addr, unsigned long next,
struct mm_walk *walk);
int (*test_walk)(unsigned long addr, unsigned long next,
struct mm_walk *walk);
+ int (*pre_vma)(unsigned long start, unsigned long end,
+ struct mm_walk *walk);
+ void (*post_vma)(struct mm_walk *walk);
+};
+
+/*
+ * Action for pud_entry / pmd_entry callbacks.
+ * ACTION_SUBTREE is the default
+ */
+enum page_walk_action {
+ /* Descend to next level, splitting huge pages if needed and possible */
+ ACTION_SUBTREE = 0,
+ /* Continue to next entry at this level (ignoring any subtree) */
+ ACTION_CONTINUE = 1,
+ /* Call again for this entry */
+ ACTION_AGAIN = 2
};
/**
* mm_walk - walk_page_range data
* @ops: operation to call during the walk
* @mm: mm_struct representing the target process of page table walk
+ * @pgd: pointer to PGD; only valid with no_vma (otherwise set to NULL)
* @vma: vma currently walked (NULL if walking outside vmas)
+ * @action: next action to perform (see enum page_walk_action)
+ * @no_vma: walk ignoring vmas (vma will always be NULL)
* @private: private data for callbacks' usage
*
* (see the comment on walk_page_range() for more details)
@@ -53,14 +85,24 @@ struct mm_walk_ops {
struct mm_walk {
const struct mm_walk_ops *ops;
struct mm_struct *mm;
+ pgd_t *pgd;
struct vm_area_struct *vma;
+ enum page_walk_action action;
+ bool no_vma;
void *private;
};
int walk_page_range(struct mm_struct *mm, unsigned long start,
unsigned long end, const struct mm_walk_ops *ops,
void *private);
+int walk_page_range_novma(struct mm_struct *mm, unsigned long start,
+ unsigned long end, const struct mm_walk_ops *ops,
+ pgd_t *pgd,
+ void *private);
int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
void *private);
+int walk_page_mapping(struct address_space *mapping, pgoff_t first_index,
+ pgoff_t nr, const struct mm_walk_ops *ops,
+ void *private);
#endif /* _LINUX_PAGEWALK_H */
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 397607a0c0eb..13932ce8b37b 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -460,6 +460,7 @@ extern size_t parport_ieee1284_epp_read_addr (struct parport *,
void *, size_t, int);
/* IEEE1284.3 functions */
+#define daisy_dev_name "Device ID probe"
extern int parport_daisy_init (struct parport *port);
extern void parport_daisy_fini (struct parport *port);
extern struct pardevice *parport_open (int devnum, const char *name);
diff --git a/include/linux/part_stat.h b/include/linux/part_stat.h
new file mode 100644
index 000000000000..ece607607a86
--- /dev/null
+++ b/include/linux/part_stat.h
@@ -0,0 +1,115 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_PART_STAT_H
+#define _LINUX_PART_STAT_H
+
+#include <linux/genhd.h>
+
+/*
+ * Macros to operate on percpu disk statistics:
+ *
+ * {disk|part|all}_stat_{add|sub|inc|dec}() modify the stat counters
+ * and should be called between disk_stat_lock() and
+ * disk_stat_unlock().
+ *
+ * part_stat_read() can be called at any time.
+ *
+ * part_stat_{add|set_all}() and {init|free}_part_stats are for
+ * internal use only.
+ */
+#ifdef CONFIG_SMP
+#define part_stat_lock() ({ rcu_read_lock(); get_cpu(); })
+#define part_stat_unlock() do { put_cpu(); rcu_read_unlock(); } while (0)
+
+#define part_stat_get_cpu(part, field, cpu) \
+ (per_cpu_ptr((part)->dkstats, (cpu))->field)
+
+#define part_stat_get(part, field) \
+ part_stat_get_cpu(part, field, smp_processor_id())
+
+#define part_stat_read(part, field) \
+({ \
+ typeof((part)->dkstats->field) res = 0; \
+ unsigned int _cpu; \
+ for_each_possible_cpu(_cpu) \
+ res += per_cpu_ptr((part)->dkstats, _cpu)->field; \
+ res; \
+})
+
+static inline void part_stat_set_all(struct hd_struct *part, int value)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ memset(per_cpu_ptr(part->dkstats, i), value,
+ sizeof(struct disk_stats));
+}
+
+static inline int init_part_stats(struct hd_struct *part)
+{
+ part->dkstats = alloc_percpu(struct disk_stats);
+ if (!part->dkstats)
+ return 0;
+ return 1;
+}
+
+static inline void free_part_stats(struct hd_struct *part)
+{
+ free_percpu(part->dkstats);
+}
+
+#else /* !CONFIG_SMP */
+#define part_stat_lock() ({ rcu_read_lock(); 0; })
+#define part_stat_unlock() rcu_read_unlock()
+
+#define part_stat_get(part, field) ((part)->dkstats.field)
+#define part_stat_get_cpu(part, field, cpu) part_stat_get(part, field)
+#define part_stat_read(part, field) part_stat_get(part, field)
+
+static inline void part_stat_set_all(struct hd_struct *part, int value)
+{
+ memset(&part->dkstats, value, sizeof(struct disk_stats));
+}
+
+static inline int init_part_stats(struct hd_struct *part)
+{
+ return 1;
+}
+
+static inline void free_part_stats(struct hd_struct *part)
+{
+}
+
+#endif /* CONFIG_SMP */
+
+#define part_stat_read_accum(part, field) \
+ (part_stat_read(part, field[STAT_READ]) + \
+ part_stat_read(part, field[STAT_WRITE]) + \
+ part_stat_read(part, field[STAT_DISCARD]))
+
+#define __part_stat_add(part, field, addnd) \
+ (part_stat_get(part, field) += (addnd))
+
+#define part_stat_add(part, field, addnd) do { \
+ __part_stat_add((part), field, addnd); \
+ if ((part)->partno) \
+ __part_stat_add(&part_to_disk((part))->part0, \
+ field, addnd); \
+} while (0)
+
+#define part_stat_dec(gendiskp, field) \
+ part_stat_add(gendiskp, field, -1)
+#define part_stat_inc(gendiskp, field) \
+ part_stat_add(gendiskp, field, 1)
+#define part_stat_sub(gendiskp, field, subnd) \
+ part_stat_add(gendiskp, field, -subnd)
+
+#define part_stat_local_dec(gendiskp, field) \
+ local_dec(&(part_stat_get(gendiskp, field)))
+#define part_stat_local_inc(gendiskp, field) \
+ local_inc(&(part_stat_get(gendiskp, field)))
+#define part_stat_local_read(gendiskp, field) \
+ local_read(&(part_stat_get(gendiskp, field)))
+#define part_stat_local_read_cpu(gendiskp, field, cpu) \
+ local_read(&(part_stat_get_cpu(gendiskp, field, cpu)))
+
+#endif /* _LINUX_PART_STAT_H */
diff --git a/include/linux/pci-ats.h b/include/linux/pci-ats.h
index 1ebb88e7c184..d08f0869f121 100644
--- a/include/linux/pci-ats.h
+++ b/include/linux/pci-ats.h
@@ -4,74 +4,42 @@
#include <linux/pci.h>
-#ifdef CONFIG_PCI_PRI
+#ifdef CONFIG_PCI_ATS
+/* Address Translation Service */
+int pci_enable_ats(struct pci_dev *dev, int ps);
+void pci_disable_ats(struct pci_dev *dev);
+int pci_ats_queue_depth(struct pci_dev *dev);
+int pci_ats_page_aligned(struct pci_dev *dev);
+#else /* CONFIG_PCI_ATS */
+static inline int pci_enable_ats(struct pci_dev *d, int ps)
+{ return -ENODEV; }
+static inline void pci_disable_ats(struct pci_dev *d) { }
+static inline int pci_ats_queue_depth(struct pci_dev *d)
+{ return -ENODEV; }
+static inline int pci_ats_page_aligned(struct pci_dev *dev)
+{ return 0; }
+#endif /* CONFIG_PCI_ATS */
+#ifdef CONFIG_PCI_PRI
int pci_enable_pri(struct pci_dev *pdev, u32 reqs);
void pci_disable_pri(struct pci_dev *pdev);
-void pci_restore_pri_state(struct pci_dev *pdev);
int pci_reset_pri(struct pci_dev *pdev);
-
-#else /* CONFIG_PCI_PRI */
-
-static inline int pci_enable_pri(struct pci_dev *pdev, u32 reqs)
-{
- return -ENODEV;
-}
-
-static inline void pci_disable_pri(struct pci_dev *pdev)
-{
-}
-
-static inline void pci_restore_pri_state(struct pci_dev *pdev)
-{
-}
-
-static inline int pci_reset_pri(struct pci_dev *pdev)
-{
- return -ENODEV;
-}
-
+int pci_prg_resp_pasid_required(struct pci_dev *pdev);
#endif /* CONFIG_PCI_PRI */
#ifdef CONFIG_PCI_PASID
-
int pci_enable_pasid(struct pci_dev *pdev, int features);
void pci_disable_pasid(struct pci_dev *pdev);
-void pci_restore_pasid_state(struct pci_dev *pdev);
int pci_pasid_features(struct pci_dev *pdev);
int pci_max_pasids(struct pci_dev *pdev);
-int pci_prg_resp_pasid_required(struct pci_dev *pdev);
-
-#else /* CONFIG_PCI_PASID */
-
+#else /* CONFIG_PCI_PASID */
static inline int pci_enable_pasid(struct pci_dev *pdev, int features)
-{
- return -EINVAL;
-}
-
-static inline void pci_disable_pasid(struct pci_dev *pdev)
-{
-}
-
-static inline void pci_restore_pasid_state(struct pci_dev *pdev)
-{
-}
-
+{ return -EINVAL; }
+static inline void pci_disable_pasid(struct pci_dev *pdev) { }
static inline int pci_pasid_features(struct pci_dev *pdev)
-{
- return -EINVAL;
-}
-
+{ return -EINVAL; }
static inline int pci_max_pasids(struct pci_dev *pdev)
-{
- return -EINVAL;
-}
-
-static inline int pci_prg_resp_pasid_required(struct pci_dev *pdev)
-{
- return 0;
-}
+{ return -EINVAL; }
#endif /* CONFIG_PCI_PASID */
-
-#endif /* LINUX_PCI_ATS_H*/
+#endif /* LINUX_PCI_ATS_H */
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
index f641badc2c61..56f1846b9d39 100644
--- a/include/linux/pci-epc.h
+++ b/include/linux/pci-epc.h
@@ -117,7 +117,7 @@ struct pci_epc_features {
unsigned int msix_capable : 1;
u8 reserved_bar;
u8 bar_fixed_64bit;
- u64 bar_fixed_size[BAR_5 + 1];
+ u64 bar_fixed_size[PCI_STD_NUM_BARS];
size_t align;
};
diff --git a/include/linux/pci.h b/include/linux/pci.h
index f9088c89a534..3840a541a9de 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -82,7 +82,7 @@ enum pci_mmap_state {
enum {
/* #0-5: standard PCI resources */
PCI_STD_RESOURCES,
- PCI_STD_RESOURCE_END = 5,
+ PCI_STD_RESOURCE_END = PCI_STD_RESOURCES + PCI_STD_NUM_BARS - 1,
/* #6: expansion ROM resource */
PCI_ROM_RESOURCE,
@@ -284,7 +284,6 @@ struct irq_affinity;
struct pcie_link_state;
struct pci_vpd;
struct pci_sriov;
-struct pci_ats;
struct pci_p2pdma;
/* The pci_dev structure describes PCI devices */
@@ -452,12 +451,14 @@ struct pci_dev {
};
u16 ats_cap; /* ATS Capability offset */
u8 ats_stu; /* ATS Smallest Translation Unit */
- atomic_t ats_ref_cnt; /* Number of VFs with ATS enabled */
#endif
#ifdef CONFIG_PCI_PRI
+ u16 pri_cap; /* PRI Capability offset */
u32 pri_reqs_alloc; /* Number of PRI requests allocated */
+ unsigned int pasid_required:1; /* PRG Response PASID Required */
#endif
#ifdef CONFIG_PCI_PASID
+ u16 pasid_cap; /* PASID Capability offset */
u16 pasid_features;
#endif
#ifdef CONFIG_PCI_P2PDMA
@@ -805,8 +806,6 @@ struct module;
* The remove function always gets called from process
* context, so it can sleep.
* @suspend: Put device into low power state.
- * @suspend_late: Put device into low power state.
- * @resume_early: Wake device from low power state.
* @resume: Wake device from low power state.
* (Please see Documentation/power/pci.rst for descriptions
* of PCI Power Management and the related functions.)
@@ -829,8 +828,6 @@ struct pci_driver {
int (*probe)(struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
int (*suspend)(struct pci_dev *dev, pm_message_t state); /* Device suspended */
- int (*suspend_late)(struct pci_dev *dev, pm_message_t state);
- int (*resume_early)(struct pci_dev *dev);
int (*resume)(struct pci_dev *dev); /* Device woken up */
void (*shutdown)(struct pci_dev *dev);
int (*sriov_configure)(struct pci_dev *dev, int num_vfs); /* On PF */
@@ -1205,6 +1202,7 @@ int __must_check pci_resize_resource(struct pci_dev *dev, int i, int size);
int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
void pci_ignore_hotplug(struct pci_dev *dev);
+struct pci_dev *pci_real_dma_dev(struct pci_dev *dev);
int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
@@ -1232,7 +1230,7 @@ struct pci_cap_saved_state *pci_find_saved_ext_cap(struct pci_dev *dev,
int pci_add_cap_save_buffer(struct pci_dev *dev, char cap, unsigned int size);
int pci_add_ext_cap_save_buffer(struct pci_dev *dev,
u16 cap, unsigned int size);
-int __pci_complete_power_transition(struct pci_dev *dev, pci_power_t state);
+int pci_platform_power_transition(struct pci_dev *dev, pci_power_t state);
int pci_set_power_state(struct pci_dev *dev, pci_power_t state);
pci_power_t pci_choose_state(struct pci_dev *dev, pm_message_t state);
bool pci_pme_capable(struct pci_dev *dev, pci_power_t state);
@@ -1454,7 +1452,6 @@ int pci_alloc_irq_vectors_affinity(struct pci_dev *dev, unsigned int min_vecs,
void pci_free_irq_vectors(struct pci_dev *dev);
int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
-int pci_irq_get_node(struct pci_dev *pdev, int vec);
#else
static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
@@ -1497,11 +1494,6 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
{
return cpu_possible_mask;
}
-
-static inline int pci_irq_get_node(struct pci_dev *pdev, int vec)
-{
- return first_online_node;
-}
#endif
/**
@@ -1544,9 +1536,13 @@ extern bool pcie_ports_native;
#define pcie_ports_native false
#endif
-#define PCIE_LINK_STATE_L0S 1
-#define PCIE_LINK_STATE_L1 2
-#define PCIE_LINK_STATE_CLKPM 4
+#define PCIE_LINK_STATE_L0S BIT(0)
+#define PCIE_LINK_STATE_L1 BIT(1)
+#define PCIE_LINK_STATE_CLKPM BIT(2)
+#define PCIE_LINK_STATE_L1_1 BIT(3)
+#define PCIE_LINK_STATE_L1_2 BIT(4)
+#define PCIE_LINK_STATE_L1_1_PCIPM BIT(5)
+#define PCIE_LINK_STATE_L1_2_PCIPM BIT(6)
#ifdef CONFIG_PCIEASPM
int pci_disable_link_state(struct pci_dev *pdev, int state);
@@ -1686,6 +1682,7 @@ static inline struct pci_dev *pci_get_class(unsigned int class,
static inline void pci_set_master(struct pci_dev *dev) { }
static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
static inline void pci_disable_device(struct pci_dev *dev) { }
+static inline int pcim_enable_device(struct pci_dev *pdev) { return -EIO; }
static inline int pci_assign_resource(struct pci_dev *dev, int i)
{ return -EBUSY; }
static inline int __pci_register_driver(struct pci_driver *drv,
@@ -1776,19 +1773,6 @@ pci_alloc_irq_vectors(struct pci_dev *dev, unsigned int min_vecs,
NULL);
}
-#ifdef CONFIG_PCI_ATS
-/* Address Translation Service */
-int pci_enable_ats(struct pci_dev *dev, int ps);
-void pci_disable_ats(struct pci_dev *dev);
-int pci_ats_queue_depth(struct pci_dev *dev);
-int pci_ats_page_aligned(struct pci_dev *dev);
-#else
-static inline int pci_enable_ats(struct pci_dev *d, int ps) { return -ENODEV; }
-static inline void pci_disable_ats(struct pci_dev *d) { }
-static inline int pci_ats_queue_depth(struct pci_dev *d) { return -ENODEV; }
-static inline int pci_ats_page_aligned(struct pci_dev *dev) { return 0; }
-#endif
-
/* Include architecture-dependent settings and functions */
#include <asm/pci.h>
@@ -2278,6 +2262,7 @@ struct irq_domain;
struct irq_domain *pci_host_bridge_of_msi_domain(struct pci_bus *bus);
int pci_parse_request_of_pci_ranges(struct device *dev,
struct list_head *resources,
+ struct list_head *ib_resources,
struct resource **bus_range);
/* Arch may override this (weak) */
@@ -2286,9 +2271,11 @@ struct device_node *pcibios_get_phb_of_node(struct pci_bus *bus);
#else /* CONFIG_OF */
static inline struct irq_domain *
pci_host_bridge_of_msi_domain(struct pci_bus *bus) { return NULL; }
-static inline int pci_parse_request_of_pci_ranges(struct device *dev,
- struct list_head *resources,
- struct resource **bus_range)
+static inline int
+pci_parse_request_of_pci_ranges(struct device *dev,
+ struct list_head *resources,
+ struct list_head *ib_resources,
+ struct resource **bus_range)
{
return -EINVAL;
}
@@ -2310,9 +2297,11 @@ struct irq_domain *pci_host_bridge_acpi_msi_domain(struct pci_bus *bus);
void
pci_msi_register_fwnode_provider(struct fwnode_handle *(*fn)(struct device *));
+bool pci_pr3_present(struct pci_dev *pdev);
#else
static inline struct irq_domain *
pci_host_bridge_acpi_msi_domain(struct pci_bus *bus) { return NULL; }
+static inline bool pci_pr3_present(struct pci_dev *pdev) { return false; }
#endif
#ifdef CONFIG_EEH
@@ -2322,7 +2311,7 @@ static inline struct eeh_dev *pci_dev_to_eeh_dev(struct pci_dev *pdev)
}
#endif
-void pci_add_dma_alias(struct pci_dev *dev, u8 devfn);
+void pci_add_dma_alias(struct pci_dev *dev, u8 devfn_from, unsigned nr_devfns);
bool pci_devs_are_dma_aliases(struct pci_dev *dev1, struct pci_dev *dev2);
int pci_for_each_dma_alias(struct pci_dev *pdev,
int (*fn)(struct pci_dev *pdev,
@@ -2400,4 +2389,12 @@ void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
#define pci_info_ratelimited(pdev, fmt, arg...) \
dev_info_ratelimited(&(pdev)->dev, fmt, ##arg)
+#define pci_WARN(pdev, condition, fmt, arg...) \
+ WARN(condition, "%s %s: " fmt, \
+ dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
+
+#define pci_WARN_ONCE(pdev, condition, fmt, arg...) \
+ WARN_ONCE(condition, "%s %s: " fmt, \
+ dev_driver_string(&(pdev)->dev), pci_name(pdev), ##arg)
+
#endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 21a572469a4e..977e66875a96 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -148,6 +148,8 @@
/* Vendors and devices. Sort key: vendor first, device next. */
+#define PCI_VENDOR_ID_LOONGSON 0x0014
+
#define PCI_VENDOR_ID_TTTECH 0x0357
#define PCI_DEVICE_ID_TTTECH_MC322 0x000a
@@ -549,6 +551,7 @@
#define PCI_DEVICE_ID_AMD_17H_M10H_DF_F3 0x15eb
#define PCI_DEVICE_ID_AMD_17H_M30H_DF_F3 0x1493
#define PCI_DEVICE_ID_AMD_17H_M70H_DF_F3 0x1443
+#define PCI_DEVICE_ID_AMD_19H_DF_F3 0x1653
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
@@ -3006,6 +3009,7 @@
#define PCI_DEVICE_ID_INTEL_84460GX 0x84ea
#define PCI_DEVICE_ID_INTEL_IXP4XX 0x8500
#define PCI_DEVICE_ID_INTEL_IXP2800 0x9004
+#define PCI_DEVICE_ID_INTEL_VMD_9A0B 0x9a0b
#define PCI_DEVICE_ID_INTEL_S21152BB 0xb152
#define PCI_VENDOR_ID_SCALEMP 0x8686
diff --git a/include/linux/pe.h b/include/linux/pe.h
index c86bd3a2f70f..8ad71d763a77 100644
--- a/include/linux/pe.h
+++ b/include/linux/pe.h
@@ -10,6 +10,27 @@
#include <linux/types.h>
+/*
+ * Linux EFI stub v1.0 adds the following functionality:
+ * - Loading initrd from the LINUX_EFI_INITRD_MEDIA_GUID device path,
+ * - Loading/starting the kernel from firmware that targets a different
+ * machine type, via the entrypoint exposed in the .compat PE/COFF section.
+ *
+ * The recommended way of loading and starting v1.0 or later kernels is to use
+ * the LoadImage() and StartImage() EFI boot services, and expose the initrd
+ * via the LINUX_EFI_INITRD_MEDIA_GUID device path.
+ *
+ * Versions older than v1.0 support initrd loading via the image load options
+ * (using initrd=, limited to the volume from which the kernel itself was
+ * loaded), or via arch specific means (bootparams, DT, etc).
+ *
+ * On x86, LoadImage() and StartImage() can be omitted if the EFI handover
+ * protocol is implemented, which can be inferred from the version,
+ * handover_offset and xloadflags fields in the bootparams structure.
+ */
+#define LINUX_EFISTUB_MAJOR_VERSION 0x1
+#define LINUX_EFISTUB_MINOR_VERSION 0x0
+
#define MZ_MAGIC 0x5a4d /* "MZ" */
#define PE_MAGIC 0x00004550 /* "PE\0\0" */
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index a6fabd865211..176bfbd52d97 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -175,8 +175,7 @@
* Declaration/definition used for per-CPU variables that should be accessed
* as decrypted when memory encryption is enabled in the guest.
*/
-#if defined(CONFIG_VIRTUALIZATION) && defined(CONFIG_AMD_MEM_ENCRYPT)
-
+#ifdef CONFIG_AMD_MEM_ENCRYPT
#define DECLARE_PER_CPU_DECRYPTED(type, name) \
DECLARE_PER_CPU_SECTION(type, name, "..decrypted")
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 7aef0abc194a..22d9d183950d 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -186,14 +186,14 @@ static inline void percpu_ref_get_many(struct percpu_ref *ref, unsigned long nr)
{
unsigned long __percpu *percpu_count;
- rcu_read_lock_sched();
+ rcu_read_lock();
if (__ref_is_percpu(ref, &percpu_count))
this_cpu_add(*percpu_count, nr);
else
atomic_long_add(nr, &ref->count);
- rcu_read_unlock_sched();
+ rcu_read_unlock();
}
/**
@@ -210,34 +210,50 @@ static inline void percpu_ref_get(struct percpu_ref *ref)
}
/**
- * percpu_ref_tryget - try to increment a percpu refcount
+ * percpu_ref_tryget_many - try to increment a percpu refcount
* @ref: percpu_ref to try-get
+ * @nr: number of references to get
*
- * Increment a percpu refcount unless its count already reached zero.
+ * Increment a percpu refcount by @nr unless its count already reached zero.
* Returns %true on success; %false on failure.
*
* This function is safe to call as long as @ref is between init and exit.
*/
-static inline bool percpu_ref_tryget(struct percpu_ref *ref)
+static inline bool percpu_ref_tryget_many(struct percpu_ref *ref,
+ unsigned long nr)
{
unsigned long __percpu *percpu_count;
bool ret;
- rcu_read_lock_sched();
+ rcu_read_lock();
if (__ref_is_percpu(ref, &percpu_count)) {
- this_cpu_inc(*percpu_count);
+ this_cpu_add(*percpu_count, nr);
ret = true;
} else {
- ret = atomic_long_inc_not_zero(&ref->count);
+ ret = atomic_long_add_unless(&ref->count, nr, 0);
}
- rcu_read_unlock_sched();
+ rcu_read_unlock();
return ret;
}
/**
+ * percpu_ref_tryget - try to increment a percpu refcount
+ * @ref: percpu_ref to try-get
+ *
+ * Increment a percpu refcount unless its count already reached zero.
+ * Returns %true on success; %false on failure.
+ *
+ * This function is safe to call as long as @ref is between init and exit.
+ */
+static inline bool percpu_ref_tryget(struct percpu_ref *ref)
+{
+ return percpu_ref_tryget_many(ref, 1);
+}
+
+/**
* percpu_ref_tryget_live - try to increment a live percpu refcount
* @ref: percpu_ref to try-get
*
@@ -257,7 +273,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
unsigned long __percpu *percpu_count;
bool ret = false;
- rcu_read_lock_sched();
+ rcu_read_lock();
if (__ref_is_percpu(ref, &percpu_count)) {
this_cpu_inc(*percpu_count);
@@ -266,7 +282,7 @@ static inline bool percpu_ref_tryget_live(struct percpu_ref *ref)
ret = atomic_long_inc_not_zero(&ref->count);
}
- rcu_read_unlock_sched();
+ rcu_read_unlock();
return ret;
}
@@ -285,14 +301,14 @@ static inline void percpu_ref_put_many(struct percpu_ref *ref, unsigned long nr)
{
unsigned long __percpu *percpu_count;
- rcu_read_lock_sched();
+ rcu_read_lock();
if (__ref_is_percpu(ref, &percpu_count))
this_cpu_sub(*percpu_count, nr);
else if (unlikely(atomic_long_sub_and_test(nr, &ref->count)))
ref->release(ref);
- rcu_read_unlock_sched();
+ rcu_read_unlock();
}
/**
diff --git a/include/linux/percpu-rwsem.h b/include/linux/percpu-rwsem.h
index 3998cdf9cd14..5e033fe1ff4e 100644
--- a/include/linux/percpu-rwsem.h
+++ b/include/linux/percpu-rwsem.h
@@ -3,41 +3,52 @@
#define _LINUX_PERCPU_RWSEM_H
#include <linux/atomic.h>
-#include <linux/rwsem.h>
#include <linux/percpu.h>
#include <linux/rcuwait.h>
+#include <linux/wait.h>
#include <linux/rcu_sync.h>
#include <linux/lockdep.h>
struct percpu_rw_semaphore {
struct rcu_sync rss;
unsigned int __percpu *read_count;
- struct rw_semaphore rw_sem; /* slowpath */
- struct rcuwait writer; /* blocked writer */
- int readers_block;
+ struct rcuwait writer;
+ wait_queue_head_t waiters;
+ atomic_t block;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif
};
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname },
+#else
+#define __PERCPU_RWSEM_DEP_MAP_INIT(lockname)
+#endif
+
#define __DEFINE_PERCPU_RWSEM(name, is_static) \
static DEFINE_PER_CPU(unsigned int, __percpu_rwsem_rc_##name); \
is_static struct percpu_rw_semaphore name = { \
.rss = __RCU_SYNC_INITIALIZER(name.rss), \
.read_count = &__percpu_rwsem_rc_##name, \
- .rw_sem = __RWSEM_INITIALIZER(name.rw_sem), \
.writer = __RCUWAIT_INITIALIZER(name.writer), \
+ .waiters = __WAIT_QUEUE_HEAD_INITIALIZER(name.waiters), \
+ .block = ATOMIC_INIT(0), \
+ __PERCPU_RWSEM_DEP_MAP_INIT(name) \
}
+
#define DEFINE_PERCPU_RWSEM(name) \
__DEFINE_PERCPU_RWSEM(name, /* not static */)
#define DEFINE_STATIC_PERCPU_RWSEM(name) \
__DEFINE_PERCPU_RWSEM(name, static)
-extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
-extern void __percpu_up_read(struct percpu_rw_semaphore *);
+extern bool __percpu_down_read(struct percpu_rw_semaphore *, bool);
static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
{
might_sleep();
- rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 0, _RET_IP_);
+ rwsem_acquire_read(&sem->dep_map, 0, 0, _RET_IP_);
preempt_disable();
/*
@@ -48,8 +59,9 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
* and that once the synchronize_rcu() is done, the writer will see
* anything we did within this RCU-sched read-size critical section.
*/
- __this_cpu_inc(*sem->read_count);
- if (unlikely(!rcu_sync_is_idle(&sem->rss)))
+ if (likely(rcu_sync_is_idle(&sem->rss)))
+ __this_cpu_inc(*sem->read_count);
+ else
__percpu_down_read(sem, false); /* Unconditional memory barrier */
/*
* The preempt_enable() prevents the compiler from
@@ -58,16 +70,17 @@ static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
preempt_enable();
}
-static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
+static inline bool percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
{
- int ret = 1;
+ bool ret = true;
preempt_disable();
/*
* Same as in percpu_down_read().
*/
- __this_cpu_inc(*sem->read_count);
- if (unlikely(!rcu_sync_is_idle(&sem->rss)))
+ if (likely(rcu_sync_is_idle(&sem->rss)))
+ __this_cpu_inc(*sem->read_count);
+ else
ret = __percpu_down_read(sem, true); /* Unconditional memory barrier */
preempt_enable();
/*
@@ -76,24 +89,36 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
*/
if (ret)
- rwsem_acquire_read(&sem->rw_sem.dep_map, 0, 1, _RET_IP_);
+ rwsem_acquire_read(&sem->dep_map, 0, 1, _RET_IP_);
return ret;
}
static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
{
+ rwsem_release(&sem->dep_map, _RET_IP_);
+
preempt_disable();
/*
* Same as in percpu_down_read().
*/
- if (likely(rcu_sync_is_idle(&sem->rss)))
+ if (likely(rcu_sync_is_idle(&sem->rss))) {
__this_cpu_dec(*sem->read_count);
- else
- __percpu_up_read(sem); /* Unconditional memory barrier */
+ } else {
+ /*
+ * slowpath; reader will only ever wake a single blocked
+ * writer.
+ */
+ smp_mb(); /* B matches C */
+ /*
+ * In other words, if they see our decrement (presumably to
+ * aggregate zero, as that is the only time it matters) they
+ * will also see our critical section.
+ */
+ __this_cpu_dec(*sem->read_count);
+ rcuwait_wake_up(&sem->writer);
+ }
preempt_enable();
-
- rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
}
extern void percpu_down_write(struct percpu_rw_semaphore *);
@@ -110,29 +135,19 @@ extern void percpu_free_rwsem(struct percpu_rw_semaphore *);
__percpu_init_rwsem(sem, #sem, &rwsem_key); \
})
-#define percpu_rwsem_is_held(sem) lockdep_is_held(&(sem)->rw_sem)
-
-#define percpu_rwsem_assert_held(sem) \
- lockdep_assert_held(&(sem)->rw_sem)
+#define percpu_rwsem_is_held(sem) lockdep_is_held(sem)
+#define percpu_rwsem_assert_held(sem) lockdep_assert_held(sem)
static inline void percpu_rwsem_release(struct percpu_rw_semaphore *sem,
bool read, unsigned long ip)
{
- lock_release(&sem->rw_sem.dep_map, 1, ip);
-#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
- if (!read)
- atomic_long_set(&sem->rw_sem.owner, RWSEM_OWNER_UNKNOWN);
-#endif
+ lock_release(&sem->dep_map, ip);
}
static inline void percpu_rwsem_acquire(struct percpu_rw_semaphore *sem,
bool read, unsigned long ip)
{
- lock_acquire(&sem->rw_sem.dep_map, 0, 1, read, 1, NULL, ip);
-#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
- if (!read)
- atomic_long_set(&sem->rw_sem.owner, (long)current);
-#endif
+ lock_acquire(&sem->dep_map, 0, 1, read, 1, NULL, ip);
}
#endif
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 71f525a35ac2..5b616dde9a4c 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -80,6 +80,7 @@ struct arm_pmu {
struct pmu pmu;
cpumask_t supported_cpus;
char *name;
+ int pmuver;
irqreturn_t (*handle_irq)(struct arm_pmu *pmu);
void (*enable)(struct perf_event *event);
void (*disable)(struct perf_event *event);
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 68ccc5b1913b..8768a39b5258 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -56,6 +56,7 @@ struct perf_guest_info_callbacks {
#include <linux/perf_regs.h>
#include <linux/cgroup.h>
#include <linux/refcount.h>
+#include <linux/security.h>
#include <asm/local.h>
struct perf_callchain_entry {
@@ -92,14 +93,26 @@ struct perf_raw_record {
/*
* branch stack layout:
* nr: number of taken branches stored in entries[]
+ * hw_idx: The low level index of raw branch records
+ * for the most recent branch.
+ * -1ULL means invalid/unknown.
*
* Note that nr can vary from sample to sample
* branches (to, from) are stored from most recent
* to least recent, i.e., entries[0] contains the most
* recent branch.
+ * The entries[] is an abstraction of raw branch records,
+ * which may not be stored in age order in HW, e.g. Intel LBR.
+ * The hw_idx is to expose the low level index of raw
+ * branch record for the most recent branch aka entries[0].
+ * The hw_idx index is between -1 (unknown) and max depth,
+ * which can be retrieved in /sys/devices/cpu/caps/branches.
+ * For the architectures whose raw branch records are
+ * already stored in age order, the hw_idx should be 0.
*/
struct perf_branch_stack {
__u64 nr;
+ __u64 hw_idx;
struct perf_branch_entry entries[0];
};
@@ -248,6 +261,8 @@ struct perf_event;
#define PERF_PMU_CAP_NO_EXCLUDE 0x80
#define PERF_PMU_CAP_AUX_OUTPUT 0x100
+struct perf_output_handle;
+
/**
* struct pmu - generic performance monitoring unit
*/
@@ -409,6 +424,15 @@ struct pmu {
*/
size_t task_ctx_size;
+ /*
+ * PMU specific parts of task perf event context (i.e. ctx->task_ctx_data)
+ * can be synchronized using this function. See Intel LBR callstack support
+ * implementation and Perf core context switch handling callbacks for usage
+ * examples.
+ */
+ void (*swap_task_ctx) (struct perf_event_context *prev,
+ struct perf_event_context *next);
+ /* optional */
/*
* Set up pmu-private data structures for an AUX area
@@ -423,6 +447,19 @@ struct pmu {
void (*free_aux) (void *aux); /* optional */
/*
+ * Take a snapshot of the AUX buffer without touching the event
+ * state, so that preempting ->start()/->stop() callbacks does
+ * not interfere with their logic. Called in PMI context.
+ *
+ * Returns the size of AUX data copied to the output handle.
+ *
+ * Optional.
+ */
+ long (*snapshot_aux) (struct perf_event *event,
+ struct perf_output_handle *handle,
+ unsigned long size);
+
+ /*
* Validate address range filters: make sure the HW supports the
* requested configuration and number of filters; return 0 if the
* supplied filters are valid, -errno otherwise.
@@ -557,7 +594,7 @@ struct swevent_hlist {
#define PERF_ATTACH_ITRACE 0x10
struct perf_cgroup;
-struct ring_buffer;
+struct perf_buffer;
struct pmu_event_list {
raw_spinlock_t lock;
@@ -669,7 +706,7 @@ struct perf_event {
struct mutex mmap_mutex;
atomic_t mmap_count;
- struct ring_buffer *rb;
+ struct perf_buffer *rb;
struct list_head rb_entry;
unsigned long rcu_batches;
int rcu_pending;
@@ -721,6 +758,9 @@ struct perf_event {
struct perf_cgroup *cgrp; /* cgroup event is attach to */
#endif
+#ifdef CONFIG_SECURITY
+ void *security;
+#endif
struct list_head sb_list;
#endif /* CONFIG_PERF_EVENTS */
};
@@ -822,11 +862,18 @@ struct perf_cpu_context {
int sched_cb_usage;
int online;
+ /*
+ * Per-CPU storage for iterators used in visit_groups_merge. The default
+ * storage is of size 2 to hold the CPU and any CPU event iterators.
+ */
+ int heap_size;
+ struct perf_event **heap;
+ struct perf_event *heap_default[2];
};
struct perf_output_handle {
struct perf_event *event;
- struct ring_buffer *rb;
+ struct perf_buffer *rb;
unsigned long wakeup;
unsigned long size;
u64 aux_flags;
@@ -960,6 +1007,7 @@ struct perf_sample_data {
u32 reserved;
} cpu_entry;
struct perf_callchain_entry *callchain;
+ u64 aux_size;
/*
* regs_user may point to task_pt_regs or to regs_user_copy, depending
@@ -1241,19 +1289,41 @@ extern int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
int perf_event_max_stack_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp, loff_t *ppos);
-static inline bool perf_paranoid_tracepoint_raw(void)
+/* Access to perf_event_open(2) syscall. */
+#define PERF_SECURITY_OPEN 0
+
+/* Finer grained perf_event_open(2) access control. */
+#define PERF_SECURITY_CPU 1
+#define PERF_SECURITY_KERNEL 2
+#define PERF_SECURITY_TRACEPOINT 3
+
+static inline int perf_is_paranoid(void)
{
return sysctl_perf_event_paranoid > -1;
}
-static inline bool perf_paranoid_cpu(void)
+static inline int perf_allow_kernel(struct perf_event_attr *attr)
{
- return sysctl_perf_event_paranoid > 0;
+ if (sysctl_perf_event_paranoid > 1 && !capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ return security_perf_event_open(attr, PERF_SECURITY_KERNEL);
}
-static inline bool perf_paranoid_kernel(void)
+static inline int perf_allow_cpu(struct perf_event_attr *attr)
{
- return sysctl_perf_event_paranoid > 1;
+ if (sysctl_perf_event_paranoid > 0 && !capable(CAP_SYS_ADMIN))
+ return -EACCES;
+
+ return security_perf_event_open(attr, PERF_SECURITY_CPU);
+}
+
+static inline int perf_allow_tracepoint(struct perf_event_attr *attr)
+{
+ if (sysctl_perf_event_paranoid > -1 && !capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ return security_perf_event_open(attr, PERF_SECURITY_TRACEPOINT);
}
extern void perf_event_init(void);
@@ -1327,6 +1397,9 @@ extern unsigned int perf_output_copy(struct perf_output_handle *handle,
const void *buf, unsigned int len);
extern unsigned int perf_output_skip(struct perf_output_handle *handle,
unsigned int len);
+extern long perf_output_copy_aux(struct perf_output_handle *aux_handle,
+ struct perf_output_handle *handle,
+ unsigned long from, unsigned long to);
extern int perf_swevent_get_recursion_context(void);
extern void perf_swevent_put_recursion_context(int rctx);
extern u64 perf_swevent_set_period(struct perf_event *event);
@@ -1336,6 +1409,8 @@ extern void perf_event_disable_local(struct perf_event *event);
extern void perf_event_disable_inatomic(struct perf_event *event);
extern void perf_event_task_tick(void);
extern int perf_event_account_interrupt(struct perf_event *event);
+extern int perf_event_period(struct perf_event *event, u64 value);
+extern u64 perf_event_pause(struct perf_event *event, bool reset);
#else /* !CONFIG_PERF_EVENTS: */
static inline void *
perf_aux_output_begin(struct perf_output_handle *handle,
@@ -1415,6 +1490,14 @@ static inline void perf_event_disable(struct perf_event *event) { }
static inline int __perf_event_disable(void *info) { return -1; }
static inline void perf_event_task_tick(void) { }
static inline int perf_event_release_kernel(struct perf_event *event) { return 0; }
+static inline int perf_event_period(struct perf_event *event, u64 value)
+{
+ return -EINVAL;
+}
+static inline u64 perf_event_pause(struct perf_event *event, bool reset)
+{
+ return 0;
+}
#endif
#if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_INTEL)
@@ -1480,4 +1563,8 @@ int perf_event_exit_cpu(unsigned int cpu);
#define perf_event_exit_cpu NULL
#endif
+extern void __weak arch_perf_update_userpage(struct perf_event *event,
+ struct perf_event_mmap_page *userpg,
+ u64 now);
+
#endif /* _LINUX_PERF_EVENT_H */
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 9a0e981df502..452e8ba8665f 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -17,10 +17,12 @@
#include <linux/linkmode.h>
#include <linux/mdio.h>
#include <linux/mii.h>
+#include <linux/mii_timestamper.h>
#include <linux/module.h>
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/mod_devicetable.h>
+#include <linux/u64_stats_sync.h>
#include <linux/atomic.h>
@@ -99,9 +101,11 @@ typedef enum {
PHY_INTERFACE_MODE_2500BASEX,
PHY_INTERFACE_MODE_RXAUI,
PHY_INTERFACE_MODE_XAUI,
- /* 10GBASE-KR, XFI, SFI - single lane 10G Serdes */
- PHY_INTERFACE_MODE_10GKR,
+ /* 10GBASE-R, XFI, SFI - single lane 10G Serdes */
+ PHY_INTERFACE_MODE_10GBASER,
PHY_INTERFACE_MODE_USXGMII,
+ /* 10GBASE-KR - with Clause 73 AN */
+ PHY_INTERFACE_MODE_10GKR,
PHY_INTERFACE_MODE_MAX,
} phy_interface_t;
@@ -175,10 +179,12 @@ static inline const char *phy_modes(phy_interface_t interface)
return "rxaui";
case PHY_INTERFACE_MODE_XAUI:
return "xaui";
- case PHY_INTERFACE_MODE_10GKR:
- return "10gbase-kr";
+ case PHY_INTERFACE_MODE_10GBASER:
+ return "10gbase-r";
case PHY_INTERFACE_MODE_USXGMII:
return "usxgmii";
+ case PHY_INTERFACE_MODE_10GKR:
+ return "10gbase-kr";
default:
return "unknown";
}
@@ -203,8 +209,19 @@ static inline const char *phy_modes(phy_interface_t interface)
struct device;
struct phylink;
+struct sfp_bus;
+struct sfp_upstream_ops;
struct sk_buff;
+struct mdio_bus_stats {
+ u64_stats_t transfers;
+ u64_stats_t errors;
+ u64_stats_t writes;
+ u64_stats_t reads;
+ /* Must be last, add new statistics above */
+ struct u64_stats_sync syncp;
+};
+
/*
* The Bus class for PHYs. Devices which provide access to
* PHYs should register using this structure
@@ -217,6 +234,7 @@ struct mii_bus {
int (*read)(struct mii_bus *bus, int addr, int regnum);
int (*write)(struct mii_bus *bus, int addr, int regnum, u16 val);
int (*reset)(struct mii_bus *bus);
+ struct mdio_bus_stats stats[PHY_MAX_ADDR];
/*
* A lock to ensure that only one thing can read/write
@@ -325,6 +343,9 @@ struct phy_c45_device_ids {
u32 device_ids[8];
};
+struct macsec_context;
+struct macsec_ops;
+
/* phy_device: An instance of a PHY
*
* drv: Pointer to the driver for this PHY instance
@@ -336,15 +357,19 @@ struct phy_c45_device_ids {
* is_gigabit_capable: Set to true if PHY supports 1000Mbps
* has_fixups: Set to true if this phy has fixups/quirks.
* suspended: Set to true if this phy has been suspended successfully.
+ * suspended_by_mdio_bus: Set to true if this phy was suspended by MDIO bus.
* sysfs_links: Internal boolean tracking sysfs symbolic links setup/removal.
* loopback_enabled: Set true if this phy has been loopbacked successfully.
* state: state of the PHY for management purposes
* dev_flags: Device-specific flags used by the PHY driver.
* irq: IRQ number of the PHY's interrupt (-1 if none)
* phy_timer: The timer for handling the state machine
+ * sfp_bus_attached: flag indicating whether the SFP bus has been attached
+ * sfp_bus: SFP bus attached to this PHY's fiber port
* attached_dev: The attached enet driver's device instance ptr
* adjust_link: Callback for the enet controller to respond to
* changes in the link state.
+ * macsec_ops: MACsec offloading ops.
*
* speed, duplex, pause, supported, advertising, lp_advertising,
* and autoneg are used like in mii_if_info
@@ -372,6 +397,7 @@ struct phy_device {
unsigned is_gigabit_capable:1;
unsigned has_fixups:1;
unsigned suspended:1;
+ unsigned suspended_by_mdio_bus:1;
unsigned sysfs_links:1;
unsigned loopback_enabled:1;
@@ -432,14 +458,23 @@ struct phy_device {
struct mutex lock;
+ /* This may be modified under the rtnl lock */
+ bool sfp_bus_attached;
+ struct sfp_bus *sfp_bus;
struct phylink *phylink;
struct net_device *attached_dev;
+ struct mii_timestamper *mii_ts;
u8 mdix;
u8 mdix_ctrl;
void (*phy_link_change)(struct phy_device *, bool up, bool do_carrier);
void (*adjust_link)(struct net_device *dev);
+
+#if IS_ENABLED(CONFIG_MACSEC)
+ /* MACsec management functions */
+ const struct macsec_ops *macsec_ops;
+#endif
};
#define to_phy_device(d) container_of(to_mdio_device(d), \
struct phy_device, mdio)
@@ -524,6 +559,7 @@ struct phy_driver {
/*
* Checks if the PHY generated an interrupt.
* For multi-PHY devices with shared PHY interrupt pin
+ * Set interrupt bits have to be cleared.
*/
int (*did_interrupt)(struct phy_device *phydev);
@@ -539,29 +575,6 @@ struct phy_driver {
*/
int (*match_phy_device)(struct phy_device *phydev);
- /* Handles ethtool queries for hardware time stamping. */
- int (*ts_info)(struct phy_device *phydev, struct ethtool_ts_info *ti);
-
- /* Handles SIOCSHWTSTAMP ioctl for hardware time stamping. */
- int (*hwtstamp)(struct phy_device *phydev, struct ifreq *ifr);
-
- /*
- * Requests a Rx timestamp for 'skb'. If the skb is accepted,
- * the phy driver promises to deliver it using netif_rx() as
- * soon as a timestamp becomes available. One of the
- * PTP_CLASS_ values is passed in 'type'. The function must
- * return true if the skb is accepted for delivery.
- */
- bool (*rxtstamp)(struct phy_device *dev, struct sk_buff *skb, int type);
-
- /*
- * Requests a Tx timestamp for 'skb'. The phy driver promises
- * to deliver it using skb_complete_tx_timestamp() as soon as a
- * timestamp becomes available. One of the PTP_CLASS_ values
- * is passed in 'type'.
- */
- void (*txtstamp)(struct phy_device *dev, struct sk_buff *skb, int type);
-
/* Some devices (e.g. qnap TS-119P II) require PHY register changes to
* enable Wake on LAN, so set_wol is provided to be called in the
* ethernet driver's set_wol function. */
@@ -930,6 +943,66 @@ static inline bool phy_polling_mode(struct phy_device *phydev)
}
/**
+ * phy_has_hwtstamp - Tests whether a PHY time stamp configuration.
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_has_hwtstamp(struct phy_device *phydev)
+{
+ return phydev && phydev->mii_ts && phydev->mii_ts->hwtstamp;
+}
+
+/**
+ * phy_has_rxtstamp - Tests whether a PHY supports receive time stamping.
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_has_rxtstamp(struct phy_device *phydev)
+{
+ return phydev && phydev->mii_ts && phydev->mii_ts->rxtstamp;
+}
+
+/**
+ * phy_has_tsinfo - Tests whether a PHY reports time stamping and/or
+ * PTP hardware clock capabilities.
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_has_tsinfo(struct phy_device *phydev)
+{
+ return phydev && phydev->mii_ts && phydev->mii_ts->ts_info;
+}
+
+/**
+ * phy_has_txtstamp - Tests whether a PHY supports transmit time stamping.
+ * @phydev: the phy_device struct
+ */
+static inline bool phy_has_txtstamp(struct phy_device *phydev)
+{
+ return phydev && phydev->mii_ts && phydev->mii_ts->txtstamp;
+}
+
+static inline int phy_hwtstamp(struct phy_device *phydev, struct ifreq *ifr)
+{
+ return phydev->mii_ts->hwtstamp(phydev->mii_ts, ifr);
+}
+
+static inline bool phy_rxtstamp(struct phy_device *phydev, struct sk_buff *skb,
+ int type)
+{
+ return phydev->mii_ts->rxtstamp(phydev->mii_ts, skb, type);
+}
+
+static inline int phy_ts_info(struct phy_device *phydev,
+ struct ethtool_ts_info *tsinfo)
+{
+ return phydev->mii_ts->ts_info(phydev->mii_ts, tsinfo);
+}
+
+static inline void phy_txtstamp(struct phy_device *phydev, struct sk_buff *skb,
+ int type)
+{
+ phydev->mii_ts->txtstamp(phydev->mii_ts, skb, type);
+}
+
+/**
* phy_is_internal - Convenience function for testing if a PHY is internal
* @phydev: the phy_device struct
*/
@@ -993,7 +1066,7 @@ int phy_modify_paged_changed(struct phy_device *phydev, int page, u32 regnum,
int phy_modify_paged(struct phy_device *phydev, int page, u32 regnum,
u16 mask, u16 set);
-struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
+struct phy_device *phy_device_create(struct mii_bus *bus, int addr, u32 phy_id,
bool is_c45,
struct phy_c45_device_ids *c45_ids);
#if IS_ENABLED(CONFIG_PHYLIB)
@@ -1020,6 +1093,10 @@ int phy_suspend(struct phy_device *phydev);
int phy_resume(struct phy_device *phydev);
int __phy_resume(struct phy_device *phydev);
int phy_loopback(struct phy_device *phydev, bool enable);
+void phy_sfp_attach(void *upstream, struct sfp_bus *bus);
+void phy_sfp_detach(void *upstream, struct sfp_bus *bus);
+int phy_sfp_probe(struct phy_device *phydev,
+ const struct sfp_upstream_ops *ops);
struct phy_device *phy_attach(struct net_device *dev, const char *bus_id,
phy_interface_t interface);
struct phy_device *phy_find_first(struct mii_bus *bus);
@@ -1065,19 +1142,33 @@ static inline const char *phydev_name(const struct phy_device *phydev)
return dev_name(&phydev->mdio.dev);
}
+static inline void phy_lock_mdio_bus(struct phy_device *phydev)
+{
+ mutex_lock(&phydev->mdio.bus->mdio_lock);
+}
+
+static inline void phy_unlock_mdio_bus(struct phy_device *phydev)
+{
+ mutex_unlock(&phydev->mdio.bus->mdio_lock);
+}
+
void phy_attached_print(struct phy_device *phydev, const char *fmt, ...)
__printf(2, 3);
+char *phy_attached_info_irq(struct phy_device *phydev)
+ __malloc;
void phy_attached_info(struct phy_device *phydev);
/* Clause 22 PHY */
int genphy_read_abilities(struct phy_device *phydev);
int genphy_setup_forced(struct phy_device *phydev);
int genphy_restart_aneg(struct phy_device *phydev);
+int genphy_check_and_restart_aneg(struct phy_device *phydev, bool restart);
int genphy_config_eee_advert(struct phy_device *phydev);
int __genphy_config_aneg(struct phy_device *phydev, bool changed);
int genphy_aneg_done(struct phy_device *phydev);
int genphy_update_link(struct phy_device *phydev);
int genphy_read_lpa(struct phy_device *phydev);
+int genphy_read_status_fixed(struct phy_device *phydev);
int genphy_read_status(struct phy_device *phydev);
int genphy_suspend(struct phy_device *phydev);
int genphy_resume(struct phy_device *phydev);
@@ -1106,6 +1197,10 @@ int genphy_read_mmd_unsupported(struct phy_device *phdev, int devad,
int genphy_write_mmd_unsupported(struct phy_device *phdev, int devnum,
u16 regnum, u16 val);
+/* Clause 37 */
+int genphy_c37_config_aneg(struct phy_device *phydev);
+int genphy_c37_read_status(struct phy_device *phydev);
+
/* Clause 45 PHY */
int genphy_c45_restart_aneg(struct phy_device *phydev);
int genphy_c45_check_and_restart_aneg(struct phy_device *phydev, bool restart);
@@ -1145,12 +1240,13 @@ void phy_queue_state_machine(struct phy_device *phydev, unsigned long jiffies);
void phy_mac_interrupt(struct phy_device *phydev);
void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
-int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
void phy_ethtool_ksettings_get(struct phy_device *phydev,
struct ethtool_link_ksettings *cmd);
int phy_ethtool_ksettings_set(struct phy_device *phydev,
const struct ethtool_link_ksettings *cmd);
int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
+int phy_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd);
+int phy_do_ioctl_running(struct net_device *dev, struct ifreq *ifr, int cmd);
void phy_request_interrupt(struct phy_device *phydev);
void phy_free_interrupt(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
diff --git a/include/linux/phy/phy-dp.h b/include/linux/phy/phy-dp.h
new file mode 100644
index 000000000000..18cad23642cd
--- /dev/null
+++ b/include/linux/phy/phy-dp.h
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Cadence Design Systems Inc.
+ */
+
+#ifndef __PHY_DP_H_
+#define __PHY_DP_H_
+
+#include <linux/types.h>
+
+/**
+ * struct phy_configure_opts_dp - DisplayPort PHY configuration set
+ *
+ * This structure is used to represent the configuration state of a
+ * DisplayPort phy.
+ */
+struct phy_configure_opts_dp {
+ /**
+ * @link_rate:
+ *
+ * Link Rate, in Mb/s, of the main link.
+ *
+ * Allowed values: 1620, 2160, 2430, 2700, 3240, 4320, 5400, 8100 Mb/s
+ */
+ unsigned int link_rate;
+
+ /**
+ * @lanes:
+ *
+ * Number of active, consecutive, data lanes, starting from
+ * lane 0, used for the transmissions on main link.
+ *
+ * Allowed values: 1, 2, 4
+ */
+ unsigned int lanes;
+
+ /**
+ * @voltage:
+ *
+ * Voltage swing levels, as specified by DisplayPort specification,
+ * to be used by particular lanes. One value per lane.
+ * voltage[0] is for lane 0, voltage[1] is for lane 1, etc.
+ *
+ * Maximum value: 3
+ */
+ unsigned int voltage[4];
+
+ /**
+ * @pre:
+ *
+ * Pre-emphasis levels, as specified by DisplayPort specification, to be
+ * used by particular lanes. One value per lane.
+ *
+ * Maximum value: 3
+ */
+ unsigned int pre[4];
+
+ /**
+ * @ssc:
+ *
+ * Flag indicating, whether or not to enable spread-spectrum clocking.
+ *
+ */
+ u8 ssc : 1;
+
+ /**
+ * @set_rate:
+ *
+ * Flag indicating, whether or not reconfigure link rate and SSC to
+ * requested values.
+ *
+ */
+ u8 set_rate : 1;
+
+ /**
+ * @set_lanes:
+ *
+ * Flag indicating, whether or not reconfigure lane count to
+ * requested value.
+ *
+ */
+ u8 set_lanes : 1;
+
+ /**
+ * @set_voltages:
+ *
+ * Flag indicating, whether or not reconfigure voltage swing
+ * and pre-emphasis to requested values. Only lanes specified
+ * by "lanes" parameter will be affected.
+ *
+ */
+ u8 set_voltages : 1;
+};
+
+#endif /* __PHY_DP_H_ */
diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h
index 15032f145063..bcee8eba62b3 100644
--- a/include/linux/phy/phy.h
+++ b/include/linux/phy/phy.h
@@ -16,6 +16,7 @@
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
+#include <linux/phy/phy-dp.h>
#include <linux/phy/phy-mipi-dphy.h>
struct phy;
@@ -38,7 +39,9 @@ enum phy_mode {
PHY_MODE_PCIE,
PHY_MODE_ETHERNET,
PHY_MODE_MIPI_DPHY,
- PHY_MODE_SATA
+ PHY_MODE_SATA,
+ PHY_MODE_LVDS,
+ PHY_MODE_DP
};
/**
@@ -46,9 +49,12 @@ enum phy_mode {
*
* @mipi_dphy: Configuration set applicable for phys supporting
* the MIPI_DPHY phy mode.
+ * @dp: Configuration set applicable for phys supporting
+ * the DisplayPort protocol.
*/
union phy_configure_opts {
struct phy_configure_opts_mipi_dphy mipi_dphy;
+ struct phy_configure_opts_dp dp;
};
/**
@@ -233,7 +239,8 @@ struct phy *devm_of_phy_get(struct device *dev, struct device_node *np,
const char *con_id);
struct phy *devm_of_phy_get_by_index(struct device *dev, struct device_node *np,
int index);
-void phy_put(struct phy *phy);
+void of_phy_put(struct phy *phy);
+void phy_put(struct device *dev, struct phy *phy);
void devm_phy_put(struct device *dev, struct phy *phy);
struct phy *of_phy_get(struct device_node *np, const char *con_id);
struct phy *of_phy_simple_xlate(struct device *dev,
@@ -418,7 +425,11 @@ static inline struct phy *devm_of_phy_get_by_index(struct device *dev,
return ERR_PTR(-ENOSYS);
}
-static inline void phy_put(struct phy *phy)
+static inline void of_phy_put(struct phy *phy)
+{
+}
+
+static inline void phy_put(struct device *dev, struct phy *phy)
{
}
diff --git a/include/linux/phy/tegra/xusb.h b/include/linux/phy/tegra/xusb.h
index ee59562c8354..71d956935405 100644
--- a/include/linux/phy/tegra/xusb.h
+++ b/include/linux/phy/tegra/xusb.h
@@ -18,5 +18,9 @@ int tegra_xusb_padctl_hsic_set_idle(struct tegra_xusb_padctl *padctl,
unsigned int port, bool idle);
int tegra_xusb_padctl_usb3_set_lfps_detect(struct tegra_xusb_padctl *padctl,
unsigned int port, bool enable);
-
+int tegra_xusb_padctl_set_vbus_override(struct tegra_xusb_padctl *padctl,
+ bool val);
+int tegra_phy_xusb_utmi_port_reset(struct phy *phy);
+int tegra_xusb_padctl_get_usb3_companion(struct tegra_xusb_padctl *padctl,
+ unsigned int port);
#endif /* PHY_TEGRA_XUSB_H */
diff --git a/include/linux/phy_led_triggers.h b/include/linux/phy_led_triggers.h
index 3d507a8a6989..5c4d7a755101 100644
--- a/include/linux/phy_led_triggers.h
+++ b/include/linux/phy_led_triggers.h
@@ -14,7 +14,7 @@ struct phy_device;
#define PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE 11
#define PHY_LINK_LED_TRIGGER_NAME_SIZE (MII_BUS_ID_SIZE + \
- FIELD_SIZEOF(struct mdio_device, addr)+\
+ sizeof_field(struct mdio_device, addr)+\
PHY_LED_TRIGGER_SPEED_SUFFIX_SIZE)
struct phy_led_trigger {
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 300ecdb6790a..523209e70947 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -63,16 +63,18 @@ enum phylink_op_type {
* struct phylink_config - PHYLINK configuration structure
* @dev: a pointer to a struct device associated with the MAC
* @type: operation type of PHYLINK instance
+ * @pcs_poll: MAC PCS cannot provide link change interrupt
*/
struct phylink_config {
struct device *dev;
enum phylink_op_type type;
+ bool pcs_poll;
};
/**
* struct phylink_mac_ops - MAC operations structure.
* @validate: Validate and update the link configuration.
- * @mac_link_state: Read the current link state from the hardware.
+ * @mac_pcs_get_state: Read the current link state from the hardware.
* @mac_config: configure the MAC for the selected mode and state.
* @mac_an_restart: restart 802.3z BaseX autonegotiation.
* @mac_link_down: take the link down.
@@ -84,8 +86,8 @@ struct phylink_mac_ops {
void (*validate)(struct phylink_config *config,
unsigned long *supported,
struct phylink_link_state *state);
- int (*mac_link_state)(struct phylink_config *config,
- struct phylink_link_state *state);
+ void (*mac_pcs_get_state)(struct phylink_config *config,
+ struct phylink_link_state *state);
void (*mac_config)(struct phylink_config *config, unsigned int mode,
const struct phylink_link_state *state);
void (*mac_an_restart)(struct phylink_config *config);
@@ -127,18 +129,19 @@ void validate(struct phylink_config *config, unsigned long *supported,
struct phylink_link_state *state);
/**
- * mac_link_state() - Read the current link state from the hardware
+ * mac_pcs_get_state() - Read the current inband link state from the hardware
* @config: a pointer to a &struct phylink_config.
* @state: a pointer to a &struct phylink_link_state.
*
- * Read the current link state from the MAC, reporting the current
- * speed in @state->speed, duplex mode in @state->duplex, pause mode
- * in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits,
- * negotiation completion state in @state->an_complete, and link
- * up state in @state->link.
+ * Read the current inband link state from the MAC PCS, reporting the
+ * current speed in @state->speed, duplex mode in @state->duplex, pause
+ * mode in @state->pause using the %MLO_PAUSE_RX and %MLO_PAUSE_TX bits,
+ * negotiation completion state in @state->an_complete, and link up state
+ * in @state->link. If possible, @state->lp_advertising should also be
+ * populated.
*/
-int mac_link_state(struct phylink_config *config,
- struct phylink_link_state *state);
+void mac_pcs_get_state(struct phylink_config *config,
+ struct phylink_link_state *state);
/**
* mac_config() - configure the MAC for the selected mode and state
@@ -166,7 +169,7 @@ int mac_link_state(struct phylink_config *config,
* 1000base-X or Cisco SGMII mode depending on the @state->interface
* mode). In both cases, link state management (whether the link
* is up or not) is performed by the MAC, and reported via the
- * mac_link_state() callback. Changes in link state must be made
+ * mac_pcs_get_state() callback. Changes in link state must be made
* by calling phylink_mac_change().
*
* If in 802.3z mode, the link speed is fixed, dependent on the
diff --git a/include/linux/pid.h b/include/linux/pid.h
index 9645b1194c98..998ae7d24450 100644
--- a/include/linux/pid.h
+++ b/include/linux/pid.h
@@ -85,6 +85,10 @@ static inline struct pid *get_pid(struct pid *pid)
extern void put_pid(struct pid *pid);
extern struct task_struct *pid_task(struct pid *pid, enum pid_type);
+static inline bool pid_has_task(struct pid *pid, enum pid_type type)
+{
+ return !hlist_empty(&pid->tasks[type]);
+}
extern struct task_struct *get_pid_task(struct pid *pid, enum pid_type);
extern struct pid *get_task_pid(struct task_struct *task, enum pid_type type);
@@ -120,7 +124,8 @@ extern struct pid *find_vpid(int nr);
extern struct pid *find_get_pid(int nr);
extern struct pid *find_ge_pid(int nr, struct pid_namespace *);
-extern struct pid *alloc_pid(struct pid_namespace *ns);
+extern struct pid *alloc_pid(struct pid_namespace *ns, pid_t *set_tid,
+ size_t set_tid_size);
extern void free_pid(struct pid *pid);
extern void disable_pid_allocation(struct pid_namespace *ns);
diff --git a/include/linux/pid_namespace.h b/include/linux/pid_namespace.h
index 49538b172483..2ed6af88794b 100644
--- a/include/linux/pid_namespace.h
+++ b/include/linux/pid_namespace.h
@@ -12,6 +12,8 @@
#include <linux/ns_common.h>
#include <linux/idr.h>
+/* MAX_PID_NS_LEVEL is needed for limiting size of 'struct pid' */
+#define MAX_PID_NS_LEVEL 32
struct fs_pin;
diff --git a/include/linux/pinctrl/consumer.h b/include/linux/pinctrl/consumer.h
index 7f8c7d9583d3..019fecd75d0c 100644
--- a/include/linux/pinctrl/consumer.h
+++ b/include/linux/pinctrl/consumer.h
@@ -40,6 +40,7 @@ extern int pinctrl_select_state(struct pinctrl *p, struct pinctrl_state *s);
extern struct pinctrl * __must_check devm_pinctrl_get(struct device *dev);
extern void devm_pinctrl_put(struct pinctrl *p);
+extern int pinctrl_select_default_state(struct device *dev);
#ifdef CONFIG_PM
extern int pinctrl_pm_select_default_state(struct device *dev);
@@ -122,6 +123,11 @@ static inline void devm_pinctrl_put(struct pinctrl *p)
{
}
+static inline int pinctrl_select_default_state(struct device *dev)
+{
+ return 0;
+}
+
static inline int pinctrl_pm_select_default_state(struct device *dev)
{
return 0;
diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h
index ddd1b2773431..e987dc9fd2af 100644
--- a/include/linux/pinctrl/machine.h
+++ b/include/linux/pinctrl/machine.h
@@ -153,6 +153,7 @@ struct pinctrl_map {
extern int pinctrl_register_mappings(const struct pinctrl_map *map,
unsigned num_maps);
+extern void pinctrl_unregister_mappings(const struct pinctrl_map *map);
extern void pinctrl_provide_dummies(void);
#else
@@ -162,6 +163,10 @@ static inline int pinctrl_register_mappings(const struct pinctrl_map *map,
return 0;
}
+static inline void pinctrl_unregister_mappings(const struct pinctrl_map *map)
+{
+}
+
static inline void pinctrl_provide_dummies(void)
{
}
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 5c626fdc10db..ae58fad7f1e0 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -29,15 +29,16 @@ struct pipe_buffer {
/**
* struct pipe_inode_info - a linux kernel pipe
* @mutex: mutex protecting the whole thing
- * @wait: reader/writer wait point in case of empty/full pipe
- * @nrbufs: the number of non-empty pipe buffers in this pipe
- * @buffers: total number of buffers (should be a power of 2)
- * @curbuf: the current pipe buffer entry
+ * @rd_wait: reader wait point in case of empty pipe
+ * @wr_wait: writer wait point in case of full pipe
+ * @head: The point of buffer production
+ * @tail: The point of buffer consumption
+ * @max_usage: The maximum number of slots that may be used in the ring
+ * @ring_size: total number of buffers (should be a power of 2)
* @tmp_page: cached released page
* @readers: number of current readers of this pipe
* @writers: number of current writers of this pipe
* @files: number of struct file referring this pipe (protected by ->i_lock)
- * @waiting_writers: number of writers blocked waiting for room
* @r_counter: reader counter
* @w_counter: writer counter
* @fasync_readers: reader side fasync
@@ -47,12 +48,14 @@ struct pipe_buffer {
**/
struct pipe_inode_info {
struct mutex mutex;
- wait_queue_head_t wait;
- unsigned int nrbufs, curbuf, buffers;
+ wait_queue_head_t rd_wait, wr_wait;
+ unsigned int head;
+ unsigned int tail;
+ unsigned int max_usage;
+ unsigned int ring_size;
unsigned int readers;
unsigned int writers;
unsigned int files;
- unsigned int waiting_writers;
unsigned int r_counter;
unsigned int w_counter;
struct page *tmp_page;
@@ -105,6 +108,58 @@ struct pipe_buf_operations {
};
/**
+ * pipe_empty - Return true if the pipe is empty
+ * @head: The pipe ring head pointer
+ * @tail: The pipe ring tail pointer
+ */
+static inline bool pipe_empty(unsigned int head, unsigned int tail)
+{
+ return head == tail;
+}
+
+/**
+ * pipe_occupancy - Return number of slots used in the pipe
+ * @head: The pipe ring head pointer
+ * @tail: The pipe ring tail pointer
+ */
+static inline unsigned int pipe_occupancy(unsigned int head, unsigned int tail)
+{
+ return head - tail;
+}
+
+/**
+ * pipe_full - Return true if the pipe is full
+ * @head: The pipe ring head pointer
+ * @tail: The pipe ring tail pointer
+ * @limit: The maximum amount of slots available.
+ */
+static inline bool pipe_full(unsigned int head, unsigned int tail,
+ unsigned int limit)
+{
+ return pipe_occupancy(head, tail) >= limit;
+}
+
+/**
+ * pipe_space_for_user - Return number of slots available to userspace
+ * @head: The pipe ring head pointer
+ * @tail: The pipe ring tail pointer
+ * @pipe: The pipe info structure
+ */
+static inline unsigned int pipe_space_for_user(unsigned int head, unsigned int tail,
+ struct pipe_inode_info *pipe)
+{
+ unsigned int p_occupancy, p_space;
+
+ p_occupancy = pipe_occupancy(head, tail);
+ if (p_occupancy >= pipe->max_usage)
+ return 0;
+ p_space = pipe->ring_size - p_occupancy;
+ if (p_space > pipe->max_usage)
+ p_space = pipe->max_usage;
+ return p_space;
+}
+
+/**
* pipe_buf_get - get a reference to a pipe_buffer
* @pipe: the pipe that the buffer belongs to
* @buf: the buffer to get a reference to
diff --git a/include/linux/platform_data/ad7266.h b/include/linux/platform_data/ad7266.h
index 7de6c16122df..f0652567afba 100644
--- a/include/linux/platform_data/ad7266.h
+++ b/include/linux/platform_data/ad7266.h
@@ -40,14 +40,11 @@ enum ad7266_mode {
* @range: Reference voltage range the device is configured for
* @mode: Sample mode the device is configured for
* @fixed_addr: Whether the address pins are hard-wired
- * @addr_gpios: GPIOs used for controlling the address pins, only used if
- * fixed_addr is set to false.
*/
struct ad7266_platform_data {
enum ad7266_range range;
enum ad7266_mode mode;
bool fixed_addr;
- unsigned int addr_gpios[3];
};
#endif
diff --git a/include/linux/platform_data/ads1015.h b/include/linux/platform_data/ads1015.h
deleted file mode 100644
index 4cc9ffcafcbf..000000000000
--- a/include/linux/platform_data/ads1015.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Platform Data for ADS1015 12-bit 4-input ADC
- * (C) Copyright 2010
- * Dirk Eibach, Guntermann & Drunck GmbH <eibach@gdsys.de>
- */
-
-#ifndef LINUX_ADS1015_H
-#define LINUX_ADS1015_H
-
-#define ADS1015_CHANNELS 8
-
-struct ads1015_channel_data {
- bool enabled;
- unsigned int pga;
- unsigned int data_rate;
-};
-
-struct ads1015_platform_data {
- struct ads1015_channel_data channel_data[ADS1015_CHANNELS];
-};
-
-#endif /* LINUX_ADS1015_H */
diff --git a/include/linux/platform_data/b53.h b/include/linux/platform_data/b53.h
index c3b61ead41f2..6f6fed2b171d 100644
--- a/include/linux/platform_data/b53.h
+++ b/include/linux/platform_data/b53.h
@@ -19,7 +19,7 @@
#ifndef __B53_H
#define __B53_H
-#include <linux/kernel.h>
+#include <linux/types.h>
#include <linux/platform_data/dsa.h>
struct b53_platform_data {
diff --git a/include/linux/platform_data/bd6107.h b/include/linux/platform_data/bd6107.h
index 3bd019037eb3..54a06a4d2618 100644
--- a/include/linux/platform_data/bd6107.h
+++ b/include/linux/platform_data/bd6107.h
@@ -9,7 +9,6 @@ struct device;
struct bd6107_platform_data {
struct device *fbdev;
- int reset; /* Reset GPIO */
unsigned int def_value;
};
diff --git a/include/linux/platform_data/cros_ec_commands.h b/include/linux/platform_data/cros_ec_commands.h
index 98415686cbfa..69210881ebac 100644
--- a/include/linux/platform_data/cros_ec_commands.h
+++ b/include/linux/platform_data/cros_ec_commands.h
@@ -556,6 +556,9 @@ enum host_event_code {
/* Keyboard recovery combo with hardware reinitialization */
EC_HOST_EVENT_KEYBOARD_RECOVERY_HW_REINIT = 30,
+ /* WoV */
+ EC_HOST_EVENT_WOV = 31,
+
/*
* The high bit of the event mask is not used as a host event code. If
* it reads back as set, then the entire event mask should be
@@ -1277,8 +1280,6 @@ enum ec_feature_code {
* MOTIONSENSE_CMD_TABLET_MODE_LID_ANGLE.
*/
EC_FEATURE_REFINED_TABLET_MODE_HYSTERESIS = 37,
- /* EC supports audio codec. */
- EC_FEATURE_AUDIO_CODEC = 38,
/* The MCU is a System Companion Processor (SCP). */
EC_FEATURE_SCP = 39,
/* The MCU is an Integrated Sensor Hub */
@@ -4468,92 +4469,246 @@ enum mkbp_cec_event {
/*****************************************************************************/
-/* Commands for I2S recording on audio codec. */
+/* Commands for audio codec. */
+#define EC_CMD_EC_CODEC 0x00BC
-#define EC_CMD_CODEC_I2S 0x00BC
-#define EC_WOV_I2S_SAMPLE_RATE 48000
+enum ec_codec_subcmd {
+ EC_CODEC_GET_CAPABILITIES = 0x0,
+ EC_CODEC_GET_SHM_ADDR = 0x1,
+ EC_CODEC_SET_SHM_ADDR = 0x2,
+ EC_CODEC_SUBCMD_COUNT,
+};
-enum ec_codec_i2s_subcmd {
- EC_CODEC_SET_SAMPLE_DEPTH = 0x0,
- EC_CODEC_SET_GAIN = 0x1,
- EC_CODEC_GET_GAIN = 0x2,
- EC_CODEC_I2S_ENABLE = 0x3,
- EC_CODEC_I2S_SET_CONFIG = 0x4,
- EC_CODEC_I2S_SET_TDM_CONFIG = 0x5,
- EC_CODEC_I2S_SET_BCLK = 0x6,
- EC_CODEC_I2S_SUBCMD_COUNT = 0x7,
+enum ec_codec_cap {
+ EC_CODEC_CAP_WOV_AUDIO_SHM = 0,
+ EC_CODEC_CAP_WOV_LANG_SHM = 1,
+ EC_CODEC_CAP_LAST = 32,
};
-enum ec_sample_depth_value {
- EC_CODEC_SAMPLE_DEPTH_16 = 0,
- EC_CODEC_SAMPLE_DEPTH_24 = 1,
+enum ec_codec_shm_id {
+ EC_CODEC_SHM_ID_WOV_AUDIO = 0x0,
+ EC_CODEC_SHM_ID_WOV_LANG = 0x1,
+ EC_CODEC_SHM_ID_LAST,
};
-enum ec_i2s_config {
- EC_DAI_FMT_I2S = 0,
- EC_DAI_FMT_RIGHT_J = 1,
- EC_DAI_FMT_LEFT_J = 2,
- EC_DAI_FMT_PCM_A = 3,
- EC_DAI_FMT_PCM_B = 4,
- EC_DAI_FMT_PCM_TDM = 5,
+enum ec_codec_shm_type {
+ EC_CODEC_SHM_TYPE_EC_RAM = 0x0,
+ EC_CODEC_SHM_TYPE_SYSTEM_RAM = 0x1,
};
-/*
- * For subcommand EC_CODEC_GET_GAIN.
- */
-struct __ec_align1 ec_codec_i2s_gain {
+struct __ec_align1 ec_param_ec_codec_get_shm_addr {
+ uint8_t shm_id;
+ uint8_t reserved[3];
+};
+
+struct __ec_align4 ec_param_ec_codec_set_shm_addr {
+ uint64_t phys_addr;
+ uint32_t len;
+ uint8_t shm_id;
+ uint8_t reserved[3];
+};
+
+struct __ec_align4 ec_param_ec_codec {
+ uint8_t cmd; /* enum ec_codec_subcmd */
+ uint8_t reserved[3];
+
+ union {
+ struct ec_param_ec_codec_get_shm_addr
+ get_shm_addr_param;
+ struct ec_param_ec_codec_set_shm_addr
+ set_shm_addr_param;
+ };
+};
+
+struct __ec_align4 ec_response_ec_codec_get_capabilities {
+ uint32_t capabilities;
+};
+
+struct __ec_align4 ec_response_ec_codec_get_shm_addr {
+ uint64_t phys_addr;
+ uint32_t len;
+ uint8_t type;
+ uint8_t reserved[3];
+};
+
+/*****************************************************************************/
+
+/* Commands for DMIC on audio codec. */
+#define EC_CMD_EC_CODEC_DMIC 0x00BD
+
+enum ec_codec_dmic_subcmd {
+ EC_CODEC_DMIC_GET_MAX_GAIN = 0x0,
+ EC_CODEC_DMIC_SET_GAIN_IDX = 0x1,
+ EC_CODEC_DMIC_GET_GAIN_IDX = 0x2,
+ EC_CODEC_DMIC_SUBCMD_COUNT,
+};
+
+enum ec_codec_dmic_channel {
+ EC_CODEC_DMIC_CHANNEL_0 = 0x0,
+ EC_CODEC_DMIC_CHANNEL_1 = 0x1,
+ EC_CODEC_DMIC_CHANNEL_2 = 0x2,
+ EC_CODEC_DMIC_CHANNEL_3 = 0x3,
+ EC_CODEC_DMIC_CHANNEL_4 = 0x4,
+ EC_CODEC_DMIC_CHANNEL_5 = 0x5,
+ EC_CODEC_DMIC_CHANNEL_6 = 0x6,
+ EC_CODEC_DMIC_CHANNEL_7 = 0x7,
+ EC_CODEC_DMIC_CHANNEL_COUNT,
+};
+
+struct __ec_align1 ec_param_ec_codec_dmic_set_gain_idx {
+ uint8_t channel; /* enum ec_codec_dmic_channel */
+ uint8_t gain;
+ uint8_t reserved[2];
+};
+
+struct __ec_align1 ec_param_ec_codec_dmic_get_gain_idx {
+ uint8_t channel; /* enum ec_codec_dmic_channel */
+ uint8_t reserved[3];
+};
+
+struct __ec_align4 ec_param_ec_codec_dmic {
+ uint8_t cmd; /* enum ec_codec_dmic_subcmd */
+ uint8_t reserved[3];
+
+ union {
+ struct ec_param_ec_codec_dmic_set_gain_idx
+ set_gain_idx_param;
+ struct ec_param_ec_codec_dmic_get_gain_idx
+ get_gain_idx_param;
+ };
+};
+
+struct __ec_align1 ec_response_ec_codec_dmic_get_max_gain {
+ uint8_t max_gain;
+};
+
+struct __ec_align1 ec_response_ec_codec_dmic_get_gain_idx {
+ uint8_t gain;
+};
+
+/*****************************************************************************/
+
+/* Commands for I2S RX on audio codec. */
+
+#define EC_CMD_EC_CODEC_I2S_RX 0x00BE
+
+enum ec_codec_i2s_rx_subcmd {
+ EC_CODEC_I2S_RX_ENABLE = 0x0,
+ EC_CODEC_I2S_RX_DISABLE = 0x1,
+ EC_CODEC_I2S_RX_SET_SAMPLE_DEPTH = 0x2,
+ EC_CODEC_I2S_RX_SET_DAIFMT = 0x3,
+ EC_CODEC_I2S_RX_SET_BCLK = 0x4,
+ EC_CODEC_I2S_RX_SUBCMD_COUNT,
+};
+
+enum ec_codec_i2s_rx_sample_depth {
+ EC_CODEC_I2S_RX_SAMPLE_DEPTH_16 = 0x0,
+ EC_CODEC_I2S_RX_SAMPLE_DEPTH_24 = 0x1,
+ EC_CODEC_I2S_RX_SAMPLE_DEPTH_COUNT,
+};
+
+enum ec_codec_i2s_rx_daifmt {
+ EC_CODEC_I2S_RX_DAIFMT_I2S = 0x0,
+ EC_CODEC_I2S_RX_DAIFMT_RIGHT_J = 0x1,
+ EC_CODEC_I2S_RX_DAIFMT_LEFT_J = 0x2,
+ EC_CODEC_I2S_RX_DAIFMT_COUNT,
+};
+
+struct __ec_align1 ec_param_ec_codec_i2s_rx_set_sample_depth {
+ uint8_t depth;
+ uint8_t reserved[3];
+};
+
+struct __ec_align1 ec_param_ec_codec_i2s_rx_set_gain {
uint8_t left;
uint8_t right;
+ uint8_t reserved[2];
};
-struct __ec_todo_unpacked ec_param_codec_i2s_tdm {
- int16_t ch0_delay; /* 0 to 496 */
- int16_t ch1_delay; /* -1 to 496 */
- uint8_t adjacent_to_ch0;
- uint8_t adjacent_to_ch1;
+struct __ec_align1 ec_param_ec_codec_i2s_rx_set_daifmt {
+ uint8_t daifmt;
+ uint8_t reserved[3];
};
-struct __ec_todo_packed ec_param_codec_i2s {
- /* enum ec_codec_i2s_subcmd */
- uint8_t cmd;
+struct __ec_align4 ec_param_ec_codec_i2s_rx_set_bclk {
+ uint32_t bclk;
+};
+
+struct __ec_align4 ec_param_ec_codec_i2s_rx {
+ uint8_t cmd; /* enum ec_codec_i2s_rx_subcmd */
+ uint8_t reserved[3];
+
union {
- /*
- * EC_CODEC_SET_SAMPLE_DEPTH
- * Value should be one of ec_sample_depth_value.
- */
- uint8_t depth;
+ struct ec_param_ec_codec_i2s_rx_set_sample_depth
+ set_sample_depth_param;
+ struct ec_param_ec_codec_i2s_rx_set_daifmt
+ set_daifmt_param;
+ struct ec_param_ec_codec_i2s_rx_set_bclk
+ set_bclk_param;
+ };
+};
- /*
- * EC_CODEC_SET_GAIN
- * Value should be 0~43 for both channels.
- */
- struct ec_codec_i2s_gain gain;
+/*****************************************************************************/
+/* Commands for WoV on audio codec. */
+
+#define EC_CMD_EC_CODEC_WOV 0x00BF
+
+enum ec_codec_wov_subcmd {
+ EC_CODEC_WOV_SET_LANG = 0x0,
+ EC_CODEC_WOV_SET_LANG_SHM = 0x1,
+ EC_CODEC_WOV_GET_LANG = 0x2,
+ EC_CODEC_WOV_ENABLE = 0x3,
+ EC_CODEC_WOV_DISABLE = 0x4,
+ EC_CODEC_WOV_READ_AUDIO = 0x5,
+ EC_CODEC_WOV_READ_AUDIO_SHM = 0x6,
+ EC_CODEC_WOV_SUBCMD_COUNT,
+};
- /*
- * EC_CODEC_I2S_ENABLE
- * 1 to enable, 0 to disable.
- */
- uint8_t i2s_enable;
+/*
+ * @hash is SHA256 of the whole language model.
+ * @total_len indicates the length of whole language model.
+ * @offset is the cursor from the beginning of the model.
+ * @buf is the packet buffer.
+ * @len denotes how many bytes in the buf.
+ */
+struct __ec_align4 ec_param_ec_codec_wov_set_lang {
+ uint8_t hash[32];
+ uint32_t total_len;
+ uint32_t offset;
+ uint8_t buf[128];
+ uint32_t len;
+};
- /*
- * EC_CODEC_I2S_SET_CONFIG
- * Value should be one of ec_i2s_config.
- */
- uint8_t i2s_config;
+struct __ec_align4 ec_param_ec_codec_wov_set_lang_shm {
+ uint8_t hash[32];
+ uint32_t total_len;
+};
- /*
- * EC_CODEC_I2S_SET_TDM_CONFIG
- * Value should be one of ec_i2s_config.
- */
- struct ec_param_codec_i2s_tdm tdm_param;
+struct __ec_align4 ec_param_ec_codec_wov {
+ uint8_t cmd; /* enum ec_codec_wov_subcmd */
+ uint8_t reserved[3];
- /*
- * EC_CODEC_I2S_SET_BCLK
- */
- uint32_t bclk;
+ union {
+ struct ec_param_ec_codec_wov_set_lang
+ set_lang_param;
+ struct ec_param_ec_codec_wov_set_lang_shm
+ set_lang_shm_param;
};
};
+struct __ec_align4 ec_response_ec_codec_wov_get_lang {
+ uint8_t hash[32];
+};
+
+struct __ec_align4 ec_response_ec_codec_wov_read_audio {
+ uint8_t buf[128];
+ uint32_t len;
+};
+
+struct __ec_align4 ec_response_ec_codec_wov_read_audio_shm {
+ uint32_t offset;
+ uint32_t len;
+};
/*****************************************************************************/
/* System commands */
diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h
index eab7036cda09..ba5914770191 100644
--- a/include/linux/platform_data/cros_ec_proto.h
+++ b/include/linux/platform_data/cros_ec_proto.h
@@ -115,12 +115,16 @@ struct cros_ec_command {
* code.
* @pkt_xfer: Send packet to EC and get response.
* @lock: One transaction at a time.
- * @mkbp_event_supported: True if this EC supports the MKBP event protocol.
+ * @mkbp_event_supported: 0 if MKBP not supported. Otherwise its value is
+ * the maximum supported version of the MKBP host event
+ * command + 1.
* @host_sleep_v1: True if this EC supports the sleep v1 command.
* @event_notifier: Interrupt event notifier for transport devices.
* @event_data: Raw payload transferred with the MKBP event.
* @event_size: Size in bytes of the event data.
* @host_event_wake_mask: Mask of host events that cause wake from suspend.
+ * @last_event_time: exact time from the hard irq when we got notified of
+ * a new event.
* @ec: The platform_device used by the mfd driver to interface with the
* main EC.
* @pd: The platform_device used by the mfd driver to interface with the
@@ -153,7 +157,7 @@ struct cros_ec_device {
int (*pkt_xfer)(struct cros_ec_device *ec,
struct cros_ec_command *msg);
struct mutex lock;
- bool mkbp_event_supported;
+ u8 mkbp_event_supported;
bool host_sleep_v1;
struct blocking_notifier_head event_notifier;
@@ -161,6 +165,7 @@ struct cros_ec_device {
int event_size;
u32 host_event_wake_mask;
u32 last_resume_result;
+ ktime_t last_event_time;
/* The platform devices used by the mfd driver */
struct platform_device *ec;
@@ -168,14 +173,6 @@ struct cros_ec_device {
};
/**
- * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information.
- * @sensor_num: Id of the sensor, as reported by the EC.
- */
-struct cros_ec_sensor_platform {
- u8 sensor_num;
-};
-
-/**
* struct cros_ec_platform - ChromeOS EC platform information.
* @ec_name: Name of EC device (e.g. 'cros-ec', 'cros-pd', ...)
* used in /dev/ and sysfs.
@@ -188,132 +185,62 @@ struct cros_ec_platform {
};
/**
- * cros_ec_suspend() - Handle a suspend operation for the ChromeOS EC device.
- * @ec_dev: Device to suspend.
- *
- * This can be called by drivers to handle a suspend event.
- *
- * Return: 0 on success or negative error code.
- */
-int cros_ec_suspend(struct cros_ec_device *ec_dev);
+ * struct cros_ec_dev - ChromeOS EC device entry point.
+ * @class_dev: Device structure used in sysfs.
+ * @ec_dev: cros_ec_device structure to talk to the physical device.
+ * @dev: Pointer to the platform device.
+ * @debug_info: cros_ec_debugfs structure for debugging information.
+ * @has_kb_wake_angle: True if at least 2 accelerometer are connected to the EC.
+ * @cmd_offset: Offset to apply for each command.
+ * @features: Features supported by the EC.
+ */
+struct cros_ec_dev {
+ struct device class_dev;
+ struct cros_ec_device *ec_dev;
+ struct device *dev;
+ struct cros_ec_debugfs *debug_info;
+ bool has_kb_wake_angle;
+ u16 cmd_offset;
+ u32 features[2];
+};
-/**
- * cros_ec_resume() - Handle a resume operation for the ChromeOS EC device.
- * @ec_dev: Device to resume.
- *
- * This can be called by drivers to handle a resume event.
- *
- * Return: 0 on success or negative error code.
- */
-int cros_ec_resume(struct cros_ec_device *ec_dev);
+#define to_cros_ec_dev(dev) container_of(dev, struct cros_ec_dev, class_dev)
-/**
- * cros_ec_prepare_tx() - Prepare an outgoing message in the output buffer.
- * @ec_dev: Device to register.
- * @msg: Message to write.
- *
- * This is intended to be used by all ChromeOS EC drivers, but at present
- * only SPI uses it. Once LPC uses the same protocol it can start using it.
- * I2C could use it now, with a refactor of the existing code.
- *
- * Return: 0 on success or negative error code.
- */
int cros_ec_prepare_tx(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
-/**
- * cros_ec_check_result() - Check ec_msg->result.
- * @ec_dev: EC device.
- * @msg: Message to check.
- *
- * This is used by ChromeOS EC drivers to check the ec_msg->result for
- * errors and to warn about them.
- *
- * Return: 0 on success or negative error code.
- */
int cros_ec_check_result(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
-/**
- * cros_ec_cmd_xfer() - Send a command to the ChromeOS EC.
- * @ec_dev: EC device.
- * @msg: Message to write.
- *
- * Call this to send a command to the ChromeOS EC. This should be used
- * instead of calling the EC's cmd_xfer() callback directly.
- *
- * Return: 0 on success or negative error code.
- */
int cros_ec_cmd_xfer(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
-/**
- * cros_ec_cmd_xfer_status() - Send a command to the ChromeOS EC.
- * @ec_dev: EC device.
- * @msg: Message to write.
- *
- * This function is identical to cros_ec_cmd_xfer, except it returns success
- * status only if both the command was transmitted successfully and the EC
- * replied with success status. It's not necessary to check msg->result when
- * using this function.
- *
- * Return: The number of bytes transferred on success or negative error code.
- */
int cros_ec_cmd_xfer_status(struct cros_ec_device *ec_dev,
struct cros_ec_command *msg);
-/**
- * cros_ec_register() - Register a new ChromeOS EC, using the provided info.
- * @ec_dev: Device to register.
- *
- * Before calling this, allocate a pointer to a new device and then fill
- * in all the fields up to the --private-- marker.
- *
- * Return: 0 on success or negative error code.
- */
-int cros_ec_register(struct cros_ec_device *ec_dev);
+int cros_ec_query_all(struct cros_ec_device *ec_dev);
-/**
- * cros_ec_unregister() - Remove a ChromeOS EC.
- * @ec_dev: Device to unregister.
- *
- * Call this to deregister a ChromeOS EC, then clean up any private data.
- *
- * Return: 0 on success or negative error code.
- */
-int cros_ec_unregister(struct cros_ec_device *ec_dev);
+int cros_ec_get_next_event(struct cros_ec_device *ec_dev,
+ bool *wake_event,
+ bool *has_more_events);
-/**
- * cros_ec_query_all() - Query the protocol version supported by the
- * ChromeOS EC.
- * @ec_dev: Device to register.
- *
- * Return: 0 on success or negative error code.
- */
-int cros_ec_query_all(struct cros_ec_device *ec_dev);
+u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev);
-/**
- * cros_ec_get_next_event() - Fetch next event from the ChromeOS EC.
- * @ec_dev: Device to fetch event from.
- * @wake_event: Pointer to a bool set to true upon return if the event might be
- * treated as a wake event. Ignored if null.
- *
- * Return: negative error code on errors; 0 for no data; or else number of
- * bytes received (i.e., an event was retrieved successfully). Event types are
- * written out to @ec_dev->event_data.event_type on success.
- */
-int cros_ec_get_next_event(struct cros_ec_device *ec_dev, bool *wake_event);
+int cros_ec_check_features(struct cros_ec_dev *ec, int feature);
+
+int cros_ec_get_sensor_count(struct cros_ec_dev *ec);
/**
- * cros_ec_get_host_event() - Return a mask of event set by the ChromeOS EC.
- * @ec_dev: Device to fetch event from.
+ * cros_ec_get_time_ns() - Return time in ns.
*
- * When MKBP is supported, when the EC raises an interrupt, we collect the
- * events raised and call the functions in the ec notifier. This function
- * is a helper to know which events are raised.
+ * This is the function used to record the time for last_event_time in struct
+ * cros_ec_device during the hard irq.
*
- * Return: 0 on error or non-zero bitmask of one or more EC_HOST_EVENT_*.
+ * Return: ktime_t format since boot.
*/
-u32 cros_ec_get_host_event(struct cros_ec_device *ec_dev);
+static inline ktime_t cros_ec_get_time_ns(void)
+{
+ return ktime_get_boottime_ns();
+}
#endif /* __LINUX_CROS_EC_PROTO_H */
diff --git a/include/linux/platform_data/cros_ec_sensorhub.h b/include/linux/platform_data/cros_ec_sensorhub.h
new file mode 100644
index 000000000000..bef7ffc7fce1
--- /dev/null
+++ b/include/linux/platform_data/cros_ec_sensorhub.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Chrome OS EC MEMS Sensor Hub driver.
+ *
+ * Copyright 2019 Google LLC
+ */
+
+#ifndef __LINUX_PLATFORM_DATA_CROS_EC_SENSORHUB_H
+#define __LINUX_PLATFORM_DATA_CROS_EC_SENSORHUB_H
+
+#include <linux/platform_data/cros_ec_commands.h>
+
+/**
+ * struct cros_ec_sensor_platform - ChromeOS EC sensor platform information.
+ * @sensor_num: Id of the sensor, as reported by the EC.
+ */
+struct cros_ec_sensor_platform {
+ u8 sensor_num;
+};
+
+/**
+ * struct cros_ec_sensorhub - Sensor Hub device data.
+ *
+ * @ec: Embedded Controller where the hub is located.
+ */
+struct cros_ec_sensorhub {
+ struct cros_ec_dev *ec;
+};
+
+#endif /* __LINUX_PLATFORM_DATA_CROS_EC_SENSORHUB_H */
diff --git a/include/linux/platform_data/crypto-atmel.h b/include/linux/platform_data/crypto-atmel.h
deleted file mode 100644
index 0471aaf6999b..000000000000
--- a/include/linux/platform_data/crypto-atmel.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_CRYPTO_ATMEL_H
-#define __LINUX_CRYPTO_ATMEL_H
-
-#include <linux/platform_data/dma-atmel.h>
-
-/**
- * struct crypto_dma_data - DMA data for AES/TDES/SHA
- */
-struct crypto_dma_data {
- struct at_dma_slave txdata;
- struct at_dma_slave rxdata;
-};
-
-/**
- * struct crypto_platform_data - board-specific AES/TDES/SHA configuration
- * @dma_slave: DMA slave interface to use in data transfers.
- */
-struct crypto_platform_data {
- struct crypto_dma_data *dma_slave;
-};
-
-#endif /* __LINUX_CRYPTO_ATMEL_H */
diff --git a/include/linux/platform_data/dmtimer-omap.h b/include/linux/platform_data/dmtimer-omap.h
index bdaaf537604a..95d852aef130 100644
--- a/include/linux/platform_data/dmtimer-omap.h
+++ b/include/linux/platform_data/dmtimer-omap.h
@@ -30,12 +30,12 @@ struct omap_dm_timer_ops {
int (*stop)(struct omap_dm_timer *timer);
int (*set_source)(struct omap_dm_timer *timer, int source);
- int (*set_load)(struct omap_dm_timer *timer, int autoreload,
- unsigned int value);
+ int (*set_load)(struct omap_dm_timer *timer, unsigned int value);
int (*set_match)(struct omap_dm_timer *timer, int enable,
unsigned int match);
int (*set_pwm)(struct omap_dm_timer *timer, int def_on,
- int toggle, int trigger);
+ int toggle, int trigger, int autoreload);
+ int (*get_pwm_status)(struct omap_dm_timer *timer);
int (*set_prescaler)(struct omap_dm_timer *timer, int prescaler);
unsigned int (*read_counter)(struct omap_dm_timer *timer);
diff --git a/include/linux/platform_data/ehci-sh.h b/include/linux/platform_data/ehci-sh.h
deleted file mode 100644
index 219bd79dabfc..000000000000
--- a/include/linux/platform_data/ehci-sh.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0
- *
- * EHCI SuperH driver platform data
- *
- * Copyright (C) 2012 Nobuhiro Iwamatsu <nobuhiro.iwamatsu.yj@renesas.com>
- * Copyright (C) 2012 Renesas Solutions Corp.
- */
-
-#ifndef __USB_EHCI_SH_H
-#define __USB_EHCI_SH_H
-
-struct ehci_sh_platdata {
- void (*phy_init)(void); /* Phy init function */
-};
-
-#endif /* __USB_EHCI_SH_H */
diff --git a/include/linux/platform_data/eth_ixp4xx.h b/include/linux/platform_data/eth_ixp4xx.h
new file mode 100644
index 000000000000..6f652ea0c6ae
--- /dev/null
+++ b/include/linux/platform_data/eth_ixp4xx.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PLATFORM_DATA_ETH_IXP4XX
+#define __PLATFORM_DATA_ETH_IXP4XX
+
+#include <linux/types.h>
+
+#define IXP4XX_ETH_NPEA 0x00
+#define IXP4XX_ETH_NPEB 0x10
+#define IXP4XX_ETH_NPEC 0x20
+
+/* Information about built-in Ethernet MAC interfaces */
+struct eth_plat_info {
+ u8 phy; /* MII PHY ID, 0 - 31 */
+ u8 rxq; /* configurable, currently 0 - 31 only */
+ u8 txreadyq;
+ u8 hwaddr[6];
+};
+
+#endif
diff --git a/include/linux/platform_data/gpio_backlight.h b/include/linux/platform_data/gpio_backlight.h
index 34179d600360..1a8b5b1946fe 100644
--- a/include/linux/platform_data/gpio_backlight.h
+++ b/include/linux/platform_data/gpio_backlight.h
@@ -9,9 +9,6 @@ struct device;
struct gpio_backlight_platform_data {
struct device *fbdev;
- int gpio;
- int def_value;
- const char *name;
};
#endif
diff --git a/include/linux/platform_data/hsmmc-omap.h b/include/linux/platform_data/hsmmc-omap.h
index e79d238ff18f..7124a5f4bf06 100644
--- a/include/linux/platform_data/hsmmc-omap.h
+++ b/include/linux/platform_data/hsmmc-omap.h
@@ -67,9 +67,6 @@ struct omap_hsmmc_platform_data {
/* string specifying a particular variant of hardware */
char *version;
- /* if we have special card, init it using this callback */
- void (*init_card)(struct mmc_card *card);
-
const char *name;
u32 ocr_mask;
};
diff --git a/include/linux/platform_data/i2c-pxa.h b/include/linux/platform_data/i2c-pxa.h
index cb290092599c..6a9b28399b39 100644
--- a/include/linux/platform_data/i2c-pxa.h
+++ b/include/linux/platform_data/i2c-pxa.h
@@ -55,11 +55,7 @@
*/
#define I2C_ISR_INIT 0x7FF /* status register init */
-struct i2c_slave_client;
-
struct i2c_pxa_platform_data {
- unsigned int slave_addr;
- struct i2c_slave_client *slave;
unsigned int class;
unsigned int use_pio :1;
unsigned int fast_mode :1;
diff --git a/include/linux/platform_data/intel-spi.h b/include/linux/platform_data/intel-spi.h
index ebb4f332588b..7f53a5c6f35e 100644
--- a/include/linux/platform_data/intel-spi.h
+++ b/include/linux/platform_data/intel-spi.h
@@ -13,6 +13,7 @@ enum intel_spi_type {
INTEL_SPI_BYT = 1,
INTEL_SPI_LPT,
INTEL_SPI_BXT,
+ INTEL_SPI_CNL,
};
/**
diff --git a/include/linux/platform_data/microchip-ksz.h b/include/linux/platform_data/microchip-ksz.h
index 84789ca634aa..ea1cc6d829e9 100644
--- a/include/linux/platform_data/microchip-ksz.h
+++ b/include/linux/platform_data/microchip-ksz.h
@@ -19,7 +19,7 @@
#ifndef __MICROCHIP_KSZ_H
#define __MICROCHIP_KSZ_H
-#include <linux/kernel.h>
+#include <linux/types.h>
struct ksz_platform_data {
u32 chip_id;
diff --git a/include/linux/platform_data/mlxreg.h b/include/linux/platform_data/mlxreg.h
index 6d54fe3bcac9..b8da8aef2446 100644
--- a/include/linux/platform_data/mlxreg.h
+++ b/include/linux/platform_data/mlxreg.h
@@ -101,6 +101,7 @@ struct mlxreg_core_data {
* @aggr_mask: group aggregation mask;
* @reg: group interrupt status register;
* @mask: group interrupt mask;
+ * @capability: group capability register;
* @cache: last status value for elements fro the same group;
* @count: number of available elements in the group;
* @ind: element's index inside the group;
@@ -112,6 +113,7 @@ struct mlxreg_core_item {
u32 aggr_mask;
u32 reg;
u32 mask;
+ u32 capability;
u32 cache;
u8 count;
u8 ind;
diff --git a/include/linux/platform_data/mv_usb.h b/include/linux/platform_data/mv_usb.h
index 5376b6d799d5..20d239c02bf3 100644
--- a/include/linux/platform_data/mv_usb.h
+++ b/include/linux/platform_data/mv_usb.h
@@ -6,14 +6,6 @@
#ifndef __MV_PLATFORM_USB_H
#define __MV_PLATFORM_USB_H
-enum pxa_ehci_type {
- EHCI_UNDEFINED = 0,
- PXA_U2OEHCI, /* pxa 168, 9xx */
- PXA_SPH, /* pxa 168, 9xx SPH */
- MMP3_HSIC, /* mmp3 hsic */
- MMP3_FSIC, /* mmp3 fsic */
-};
-
enum {
MV_USB_MODE_OTG,
MV_USB_MODE_HOST,
diff --git a/include/linux/platform_data/pixcir_i2c_ts.h b/include/linux/platform_data/pixcir_i2c_ts.h
deleted file mode 100644
index 4ab3cd6f1cc2..000000000000
--- a/include/linux/platform_data/pixcir_i2c_ts.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _PIXCIR_I2C_TS_H
-#define _PIXCIR_I2C_TS_H
-
-/*
- * Register map
- */
-#define PIXCIR_REG_POWER_MODE 51
-#define PIXCIR_REG_INT_MODE 52
-
-/*
- * Power modes:
- * active: max scan speed
- * idle: lower scan speed with automatic transition to active on touch
- * halt: datasheet says sleep but this is more like halt as the chip
- * clocks are cut and it can only be brought out of this mode
- * using the RESET pin.
- */
-enum pixcir_power_mode {
- PIXCIR_POWER_ACTIVE,
- PIXCIR_POWER_IDLE,
- PIXCIR_POWER_HALT,
-};
-
-#define PIXCIR_POWER_MODE_MASK 0x03
-#define PIXCIR_POWER_ALLOW_IDLE (1UL << 2)
-
-/*
- * Interrupt modes:
- * periodical: interrupt is asserted periodicaly
- * diff coordinates: interrupt is asserted when coordinates change
- * level on touch: interrupt level asserted during touch
- * pulse on touch: interrupt pulse asserted druing touch
- *
- */
-enum pixcir_int_mode {
- PIXCIR_INT_PERIODICAL,
- PIXCIR_INT_DIFF_COORD,
- PIXCIR_INT_LEVEL_TOUCH,
- PIXCIR_INT_PULSE_TOUCH,
-};
-
-#define PIXCIR_INT_MODE_MASK 0x03
-#define PIXCIR_INT_ENABLE (1UL << 3)
-#define PIXCIR_INT_POL_HIGH (1UL << 2)
-
-/**
- * struct pixcir_irc_chip_data - chip related data
- * @max_fingers: Max number of fingers reported simultaneously by h/w
- * @has_hw_ids: Hardware supports finger tracking IDs
- *
- */
-struct pixcir_i2c_chip_data {
- u8 max_fingers;
- bool has_hw_ids;
-};
-
-struct pixcir_ts_platform_data {
- int x_max;
- int y_max;
- struct pixcir_i2c_chip_data chip;
-};
-
-#endif
diff --git a/include/linux/platform_data/spi-mt65xx.h b/include/linux/platform_data/spi-mt65xx.h
index f0e6d6483e62..65fd5ffd257c 100644
--- a/include/linux/platform_data/spi-mt65xx.h
+++ b/include/linux/platform_data/spi-mt65xx.h
@@ -11,7 +11,6 @@
/* Board specific platform_data */
struct mtk_chip_config {
- u32 cs_pol;
u32 sample_sel;
};
#endif
diff --git a/include/linux/platform_data/spi-omap2-mcspi.h b/include/linux/platform_data/spi-omap2-mcspi.h
index 0bf9fddb8306..3b400b1919a9 100644
--- a/include/linux/platform_data/spi-omap2-mcspi.h
+++ b/include/linux/platform_data/spi-omap2-mcspi.h
@@ -11,6 +11,7 @@ struct omap2_mcspi_platform_config {
unsigned short num_cs;
unsigned int regs_offset;
unsigned int pin_dir:1;
+ size_t max_xfer_len;
};
struct omap2_mcspi_device_config {
diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h
index 30929c22227d..e40b28ca892e 100644
--- a/include/linux/platform_data/st_sensors_pdata.h
+++ b/include/linux/platform_data/st_sensors_pdata.h
@@ -18,12 +18,14 @@
* @open_drain: set the interrupt line to be open drain if possible.
* @spi_3wire: enable spi-3wire mode.
* @pullups: enable/disable i2c controller pullup resistors.
+ * @wakeup_source: enable/disable device as wakeup generator.
*/
struct st_sensors_platform_data {
u8 drdy_int_pin;
bool open_drain;
bool spi_3wire;
bool pullups;
+ bool wakeup_source;
};
#endif /* ST_SENSORS_PDATA_H */
diff --git a/include/linux/platform_data/tc35876x.h b/include/linux/platform_data/tc35876x.h
deleted file mode 100644
index cd6a51c71e7e..000000000000
--- a/include/linux/platform_data/tc35876x.h
+++ /dev/null
@@ -1,11 +0,0 @@
-
-#ifndef _TC35876X_H
-#define _TC35876X_H
-
-struct tc35876x_platform_data {
- int gpio_bridge_reset;
- int gpio_panel_bl_en;
- int gpio_panel_vadd;
-};
-
-#endif /* _TC35876X_H */
diff --git a/include/linux/platform_data/ti-prm.h b/include/linux/platform_data/ti-prm.h
new file mode 100644
index 000000000000..28154c3226c2
--- /dev/null
+++ b/include/linux/platform_data/ti-prm.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * TI PRM (Power & Reset Manager) platform data
+ *
+ * Copyright (C) 2019 Texas Instruments, Inc.
+ *
+ * Tero Kristo <t-kristo@ti.com>
+ */
+
+#ifndef _LINUX_PLATFORM_DATA_TI_PRM_H
+#define _LINUX_PLATFORM_DATA_TI_PRM_H
+
+struct clockdomain;
+
+struct ti_prm_platform_data {
+ void (*clkdm_deny_idle)(struct clockdomain *clkdm);
+ void (*clkdm_allow_idle)(struct clockdomain *clkdm);
+ struct clockdomain * (*clkdm_lookup)(const char *name);
+};
+
+#endif /* _LINUX_PLATFORM_DATA_TI_PRM_H */
diff --git a/include/linux/platform_data/ti-sysc.h b/include/linux/platform_data/ti-sysc.h
index b5b7a3423ca8..2cbde6542849 100644
--- a/include/linux/platform_data/ti-sysc.h
+++ b/include/linux/platform_data/ti-sysc.h
@@ -49,6 +49,9 @@ struct sysc_regbits {
s8 emufree_shift;
};
+#define SYSC_QUIRK_CLKDM_NOAUTO BIT(21)
+#define SYSC_QUIRK_FORCE_MSTANDBY BIT(20)
+#define SYSC_MODULE_QUIRK_AESS BIT(19)
#define SYSC_MODULE_QUIRK_SGX BIT(18)
#define SYSC_MODULE_QUIRK_HDQ1W BIT(17)
#define SYSC_MODULE_QUIRK_I2C BIT(16)
diff --git a/include/linux/platform_data/usb3503.h b/include/linux/platform_data/usb3503.h
index e049d51c1353..d01ef97ddf36 100644
--- a/include/linux/platform_data/usb3503.h
+++ b/include/linux/platform_data/usb3503.h
@@ -17,9 +17,6 @@ enum usb3503_mode {
struct usb3503_platform_data {
enum usb3503_mode initial_mode;
u8 port_off_mask;
- int gpio_intn;
- int gpio_connect;
- int gpio_reset;
};
#endif
diff --git a/include/linux/platform_data/wan_ixp4xx_hss.h b/include/linux/platform_data/wan_ixp4xx_hss.h
new file mode 100644
index 000000000000..d525a0feb9e1
--- /dev/null
+++ b/include/linux/platform_data/wan_ixp4xx_hss.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __PLATFORM_DATA_WAN_IXP4XX_HSS_H
+#define __PLATFORM_DATA_WAN_IXP4XX_HSS_H
+
+#include <linux/types.h>
+
+/* Information about built-in HSS (synchronous serial) interfaces */
+struct hss_plat_info {
+ int (*set_clock)(int port, unsigned int clock_type);
+ int (*open)(int port, void *pdev,
+ void (*set_carrier_cb)(void *pdev, int carrier));
+ void (*close)(int port, void *pdev);
+ u8 txreadyq;
+ u32 timer_freq;
+};
+
+#endif
diff --git a/include/linux/platform_data/wilco-ec.h b/include/linux/platform_data/wilco-ec.h
index ad03b586a095..afede15a95bf 100644
--- a/include/linux/platform_data/wilco-ec.h
+++ b/include/linux/platform_data/wilco-ec.h
@@ -29,6 +29,7 @@
* @data_size: Size of the data buffer used for EC communication.
* @debugfs_pdev: The child platform_device used by the debugfs sub-driver.
* @rtc_pdev: The child platform_device used by the RTC sub-driver.
+ * @charger_pdev: Child platform_device used by the charger config sub-driver.
* @telem_pdev: The child platform_device used by the telemetry sub-driver.
*/
struct wilco_ec_device {
@@ -41,6 +42,7 @@ struct wilco_ec_device {
size_t data_size;
struct platform_device *debugfs_pdev;
struct platform_device *rtc_pdev;
+ struct platform_device *charger_pdev;
struct platform_device *telem_pdev;
};
@@ -120,6 +122,19 @@ struct wilco_ec_message {
*/
int wilco_ec_mailbox(struct wilco_ec_device *ec, struct wilco_ec_message *msg);
+/**
+ * wilco_keyboard_leds_init() - Set up the keyboard backlight LEDs.
+ * @ec: EC device to query.
+ *
+ * After this call, the keyboard backlight will be exposed through a an LED
+ * device at /sys/class/leds.
+ *
+ * This may sleep because it uses wilco_ec_mailbox().
+ *
+ * Return: 0 on success, negative error code on failure.
+ */
+int wilco_keyboard_leds_init(struct wilco_ec_device *ec);
+
/*
* A Property is typically a data item that is stored to NVRAM
* by the EC. Each of these data items has an index associated
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index 60249e22e844..d39fc658c320 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -58,6 +58,7 @@
#define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */
#define ASUS_WMI_DEVID_LIGHTBAR 0x00050025
#define ASUS_WMI_DEVID_FAN_BOOST_MODE 0x00110018
+#define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY 0x00120075
/* Misc */
#define ASUS_WMI_DEVID_CAMERA 0x00060013
diff --git a/include/linux/platform_device.h b/include/linux/platform_device.h
index f2688404d1cd..bdc35753ef7c 100644
--- a/include/linux/platform_device.h
+++ b/include/linux/platform_device.h
@@ -24,7 +24,7 @@ struct platform_device {
int id;
bool id_auto;
struct device dev;
- u64 dma_mask;
+ u64 platform_dma_mask;
u32 num_resources;
struct resource *resource;
@@ -55,8 +55,17 @@ extern struct device *
platform_find_device_by_driver(struct device *start,
const struct device_driver *drv);
extern void __iomem *
+devm_platform_get_and_ioremap_resource(struct platform_device *pdev,
+ unsigned int index, struct resource **res);
+extern void __iomem *
devm_platform_ioremap_resource(struct platform_device *pdev,
unsigned int index);
+extern void __iomem *
+devm_platform_ioremap_resource_wc(struct platform_device *pdev,
+ unsigned int index);
+extern void __iomem *
+devm_platform_ioremap_resource_byname(struct platform_device *pdev,
+ const char *name);
extern int platform_get_irq(struct platform_device *, unsigned int);
extern int platform_get_irq_optional(struct platform_device *, unsigned int);
extern int platform_irq_count(struct platform_device *);
@@ -83,7 +92,7 @@ struct platform_device_info {
size_t size_data;
u64 dma_mask;
- struct property_entry *properties;
+ const struct property_entry *properties;
};
extern struct platform_device *platform_device_register_full(
const struct platform_device_info *pdevinfo);
@@ -294,58 +303,6 @@ void platform_unregister_drivers(struct platform_driver * const *drivers,
#define platform_register_drivers(drivers, count) \
__platform_register_drivers(drivers, count, THIS_MODULE)
-/* early platform driver interface */
-struct early_platform_driver {
- const char *class_str;
- struct platform_driver *pdrv;
- struct list_head list;
- int requested_id;
- char *buffer;
- int bufsize;
-};
-
-#define EARLY_PLATFORM_ID_UNSET -2
-#define EARLY_PLATFORM_ID_ERROR -3
-
-extern int early_platform_driver_register(struct early_platform_driver *epdrv,
- char *buf);
-extern void early_platform_add_devices(struct platform_device **devs, int num);
-
-static inline int is_early_platform_device(struct platform_device *pdev)
-{
- return !pdev->dev.driver;
-}
-
-extern void early_platform_driver_register_all(char *class_str);
-extern int early_platform_driver_probe(char *class_str,
- int nr_probe, int user_only);
-extern void early_platform_cleanup(void);
-
-#define early_platform_init(class_string, platdrv) \
- early_platform_init_buffer(class_string, platdrv, NULL, 0)
-
-#ifndef MODULE
-#define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \
-static __initdata struct early_platform_driver early_driver = { \
- .class_str = class_string, \
- .buffer = buf, \
- .bufsize = bufsiz, \
- .pdrv = platdrv, \
- .requested_id = EARLY_PLATFORM_ID_UNSET, \
-}; \
-static int __init early_platform_driver_setup_func(char *buffer) \
-{ \
- return early_platform_driver_register(&early_driver, buffer); \
-} \
-early_param(class_string, early_platform_driver_setup_func)
-#else /* MODULE */
-#define early_platform_init_buffer(class_string, platdrv, buf, bufsiz) \
-static inline char *early_platform_driver_setup_func(void) \
-{ \
- return bufsiz ? buf : NULL; \
-}
-#endif /* MODULE */
-
#ifdef CONFIG_SUSPEND
extern int platform_pm_suspend(struct device *dev);
extern int platform_pm_resume(struct device *dev);
@@ -380,4 +337,16 @@ extern int platform_dma_configure(struct device *dev);
#define USE_PLATFORM_PM_SLEEP_OPS
#endif
+#ifndef CONFIG_SUPERH
+/*
+ * REVISIT: This stub is needed for all non-SuperH users of early platform
+ * drivers. It should go away once we introduce the new platform_device-based
+ * early driver framework.
+ */
+static inline int is_sh_early_platform_device(struct platform_device *pdev)
+{
+ return 0;
+}
+#endif /* CONFIG_SUPERH */
+
#endif /* _PLATFORM_DEVICE_H_ */
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 4c441be03079..e057d1fa2469 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -637,6 +637,7 @@ extern void dev_pm_put_subsys_data(struct device *dev);
* struct dev_pm_domain - power management domain representation.
*
* @ops: Power management operations associated with this domain.
+ * @start: Called when a user needs to start the device via the domain.
* @detach: Called when removing a device from the domain.
* @activate: Called before executing probe routines for bus types and drivers.
* @sync: Called after successful driver probe.
@@ -648,6 +649,7 @@ extern void dev_pm_put_subsys_data(struct device *dev);
*/
struct dev_pm_domain {
struct dev_pm_ops ops;
+ int (*start)(struct device *dev);
void (*detach)(struct device *dev, bool power_off);
int (*activate)(struct device *dev);
void (*sync)(struct device *dev);
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index baf02ff91a31..9ec78ee53652 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -284,6 +284,8 @@ void of_genpd_del_provider(struct device_node *np);
int of_genpd_add_device(struct of_phandle_args *args, struct device *dev);
int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
struct of_phandle_args *subdomain_spec);
+int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
+ struct of_phandle_args *subdomain_spec);
struct generic_pm_domain *of_genpd_remove_last(struct device_node *np);
int of_genpd_parse_idle_states(struct device_node *dn,
struct genpd_power_state **states, int *n);
@@ -322,6 +324,12 @@ static inline int of_genpd_add_subdomain(struct of_phandle_args *parent_spec,
return -ENODEV;
}
+static inline int of_genpd_remove_subdomain(struct of_phandle_args *parent_spec,
+ struct of_phandle_args *subdomain_spec)
+{
+ return -ENODEV;
+}
+
static inline int of_genpd_parse_idle_states(struct device_node *dn,
struct genpd_power_state **states, int *n)
{
@@ -366,6 +374,7 @@ struct device *dev_pm_domain_attach_by_id(struct device *dev,
struct device *dev_pm_domain_attach_by_name(struct device *dev,
const char *name);
void dev_pm_domain_detach(struct device *dev, bool power_off);
+int dev_pm_domain_start(struct device *dev);
void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
#else
static inline int dev_pm_domain_attach(struct device *dev, bool power_on)
@@ -383,6 +392,10 @@ static inline struct device *dev_pm_domain_attach_by_name(struct device *dev,
return NULL;
}
static inline void dev_pm_domain_detach(struct device *dev, bool power_off) {}
+static inline int dev_pm_domain_start(struct device *dev)
+{
+ return 0;
+}
static inline void dev_pm_domain_set(struct device *dev,
struct dev_pm_domain *pd) {}
#endif
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index b8197ab014f2..747861816f4f 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -22,6 +22,7 @@ struct opp_table;
enum dev_pm_opp_event {
OPP_EVENT_ADD, OPP_EVENT_REMOVE, OPP_EVENT_ENABLE, OPP_EVENT_DISABLE,
+ OPP_EVENT_ADJUST_VOLTAGE,
};
/**
@@ -113,6 +114,10 @@ int dev_pm_opp_add(struct device *dev, unsigned long freq,
void dev_pm_opp_remove(struct device *dev, unsigned long freq);
void dev_pm_opp_remove_all_dynamic(struct device *dev);
+int dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+ unsigned long u_volt, unsigned long u_volt_min,
+ unsigned long u_volt_max);
+
int dev_pm_opp_enable(struct device *dev, unsigned long freq);
int dev_pm_opp_disable(struct device *dev, unsigned long freq);
@@ -242,6 +247,14 @@ static inline void dev_pm_opp_remove_all_dynamic(struct device *dev)
{
}
+static inline int
+dev_pm_opp_adjust_voltage(struct device *dev, unsigned long freq,
+ unsigned long u_volt, unsigned long u_volt_min,
+ unsigned long u_volt_max)
+{
+ return 0;
+}
+
static inline int dev_pm_opp_enable(struct device *dev, unsigned long freq)
{
return 0;
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index ebf5ef17cc2a..4a69d4af3ff8 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -1,22 +1,20 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_PM_QOS_H
-#define _LINUX_PM_QOS_H
-/* interface for the pm_qos_power infrastructure of the linux kernel.
+/*
+ * Definitions related to Power Management Quality of Service (PM QoS).
+ *
+ * Copyright (C) 2020 Intel Corporation
*
- * Mark Gross <mgross@linux.intel.com>
+ * Authors:
+ * Mark Gross <mgross@linux.intel.com>
+ * Rafael J. Wysocki <rafael.j.wysocki@intel.com>
*/
+
+#ifndef _LINUX_PM_QOS_H
+#define _LINUX_PM_QOS_H
+
#include <linux/plist.h>
#include <linux/notifier.h>
#include <linux/device.h>
-#include <linux/workqueue.h>
-
-enum {
- PM_QOS_RESERVED = 0,
- PM_QOS_CPU_DMA_LATENCY,
-
- /* insert new class ID */
- PM_QOS_NUM_CLASSES,
-};
enum pm_qos_flags_status {
PM_QOS_FLAGS_UNDEFINED = -1,
@@ -29,46 +27,21 @@ enum pm_qos_flags_status {
#define PM_QOS_LATENCY_ANY S32_MAX
#define PM_QOS_LATENCY_ANY_NS ((s64)PM_QOS_LATENCY_ANY * NSEC_PER_USEC)
-#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
+#define PM_QOS_CPU_LATENCY_DEFAULT_VALUE (2000 * USEC_PER_SEC)
#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE PM_QOS_LATENCY_ANY
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT PM_QOS_LATENCY_ANY
#define PM_QOS_RESUME_LATENCY_NO_CONSTRAINT_NS PM_QOS_LATENCY_ANY_NS
#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
+#define PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE 0
+#define PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE FREQ_QOS_MAX_DEFAULT_VALUE
#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
#define PM_QOS_FLAG_NO_POWER_OFF (1 << 0)
-struct pm_qos_request {
- struct plist_node node;
- int pm_qos_class;
- struct delayed_work work; /* for pm_qos_update_request_timeout */
-};
-
-struct pm_qos_flags_request {
- struct list_head node;
- s32 flags; /* Do not change to 64 bit */
-};
-
-enum dev_pm_qos_req_type {
- DEV_PM_QOS_RESUME_LATENCY = 1,
- DEV_PM_QOS_LATENCY_TOLERANCE,
- DEV_PM_QOS_FLAGS,
-};
-
-struct dev_pm_qos_request {
- enum dev_pm_qos_req_type type;
- union {
- struct plist_node pnode;
- struct pm_qos_flags_request flr;
- } data;
- struct device *dev;
-};
-
enum pm_qos_type {
PM_QOS_UNITIALIZED,
PM_QOS_MAX, /* return the largest value */
PM_QOS_MIN, /* return the smallest value */
- PM_QOS_SUM /* return the sum */
};
/*
@@ -85,14 +58,66 @@ struct pm_qos_constraints {
struct blocking_notifier_head *notifiers;
};
+struct pm_qos_request {
+ struct plist_node node;
+ struct pm_qos_constraints *qos;
+};
+
+struct pm_qos_flags_request {
+ struct list_head node;
+ s32 flags; /* Do not change to 64 bit */
+};
+
struct pm_qos_flags {
struct list_head list;
s32 effective_flags; /* Do not change to 64 bit */
};
+
+#define FREQ_QOS_MIN_DEFAULT_VALUE 0
+#define FREQ_QOS_MAX_DEFAULT_VALUE S32_MAX
+
+enum freq_qos_req_type {
+ FREQ_QOS_MIN = 1,
+ FREQ_QOS_MAX,
+};
+
+struct freq_constraints {
+ struct pm_qos_constraints min_freq;
+ struct blocking_notifier_head min_freq_notifiers;
+ struct pm_qos_constraints max_freq;
+ struct blocking_notifier_head max_freq_notifiers;
+};
+
+struct freq_qos_request {
+ enum freq_qos_req_type type;
+ struct plist_node pnode;
+ struct freq_constraints *qos;
+};
+
+
+enum dev_pm_qos_req_type {
+ DEV_PM_QOS_RESUME_LATENCY = 1,
+ DEV_PM_QOS_LATENCY_TOLERANCE,
+ DEV_PM_QOS_MIN_FREQUENCY,
+ DEV_PM_QOS_MAX_FREQUENCY,
+ DEV_PM_QOS_FLAGS,
+};
+
+struct dev_pm_qos_request {
+ enum dev_pm_qos_req_type type;
+ union {
+ struct plist_node pnode;
+ struct pm_qos_flags_request flr;
+ struct freq_qos_request freq;
+ } data;
+ struct device *dev;
+};
+
struct dev_pm_qos {
struct pm_qos_constraints resume_latency;
struct pm_qos_constraints latency_tolerance;
+ struct freq_constraints freq;
struct pm_qos_flags flags;
struct dev_pm_qos_request *resume_latency_req;
struct dev_pm_qos_request *latency_tolerance_req;
@@ -111,24 +136,31 @@ static inline int dev_pm_qos_request_active(struct dev_pm_qos_request *req)
return req->dev != NULL;
}
+s32 pm_qos_read_value(struct pm_qos_constraints *c);
int pm_qos_update_target(struct pm_qos_constraints *c, struct plist_node *node,
enum pm_qos_req_action action, int value);
bool pm_qos_update_flags(struct pm_qos_flags *pqf,
struct pm_qos_flags_request *req,
enum pm_qos_req_action action, s32 val);
-void pm_qos_add_request(struct pm_qos_request *req, int pm_qos_class,
- s32 value);
-void pm_qos_update_request(struct pm_qos_request *req,
- s32 new_value);
-void pm_qos_update_request_timeout(struct pm_qos_request *req,
- s32 new_value, unsigned long timeout_us);
-void pm_qos_remove_request(struct pm_qos_request *req);
-
-int pm_qos_request(int pm_qos_class);
-int pm_qos_add_notifier(int pm_qos_class, struct notifier_block *notifier);
-int pm_qos_remove_notifier(int pm_qos_class, struct notifier_block *notifier);
-int pm_qos_request_active(struct pm_qos_request *req);
-s32 pm_qos_read_value(struct pm_qos_constraints *c);
+
+#ifdef CONFIG_CPU_IDLE
+s32 cpu_latency_qos_limit(void);
+bool cpu_latency_qos_request_active(struct pm_qos_request *req);
+void cpu_latency_qos_add_request(struct pm_qos_request *req, s32 value);
+void cpu_latency_qos_update_request(struct pm_qos_request *req, s32 new_value);
+void cpu_latency_qos_remove_request(struct pm_qos_request *req);
+#else
+static inline s32 cpu_latency_qos_limit(void) { return INT_MAX; }
+static inline bool cpu_latency_qos_request_active(struct pm_qos_request *req)
+{
+ return false;
+}
+static inline void cpu_latency_qos_add_request(struct pm_qos_request *req,
+ s32 value) {}
+static inline void cpu_latency_qos_update_request(struct pm_qos_request *req,
+ s32 new_value) {}
+static inline void cpu_latency_qos_remove_request(struct pm_qos_request *req) {}
+#endif
#ifdef CONFIG_PM
enum pm_qos_flags_status __dev_pm_qos_flags(struct device *dev, s32 mask);
@@ -191,6 +223,10 @@ static inline s32 dev_pm_qos_read_value(struct device *dev,
switch (type) {
case DEV_PM_QOS_RESUME_LATENCY:
return PM_QOS_RESUME_LATENCY_NO_CONSTRAINT;
+ case DEV_PM_QOS_MIN_FREQUENCY:
+ return PM_QOS_MIN_FREQUENCY_DEFAULT_VALUE;
+ case DEV_PM_QOS_MAX_FREQUENCY:
+ return PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE;
default:
WARN_ON(1);
return 0;
@@ -255,27 +291,6 @@ static inline s32 dev_pm_qos_raw_resume_latency(struct device *dev)
}
#endif
-#define FREQ_QOS_MIN_DEFAULT_VALUE 0
-#define FREQ_QOS_MAX_DEFAULT_VALUE (-1)
-
-enum freq_qos_req_type {
- FREQ_QOS_MIN = 1,
- FREQ_QOS_MAX,
-};
-
-struct freq_constraints {
- struct pm_qos_constraints min_freq;
- struct blocking_notifier_head min_freq_notifiers;
- struct pm_qos_constraints max_freq;
- struct blocking_notifier_head max_freq_notifiers;
-};
-
-struct freq_qos_request {
- enum freq_qos_req_type type;
- struct plist_node pnode;
- struct freq_constraints *qos;
-};
-
static inline int freq_qos_request_active(struct freq_qos_request *req)
{
return !IS_ERR_OR_NULL(req->qos);
@@ -291,6 +306,8 @@ int freq_qos_add_request(struct freq_constraints *qos,
enum freq_qos_req_type type, s32 value);
int freq_qos_update_request(struct freq_qos_request *req, s32 new_value);
int freq_qos_remove_request(struct freq_qos_request *req);
+int freq_qos_apply(struct freq_qos_request *req,
+ enum pm_qos_req_action action, s32 value);
int freq_qos_add_notifier(struct freq_constraints *qos,
enum freq_qos_req_type type,
diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
index 22af69d237a6..3bdcbce8141a 100644
--- a/include/linux/pm_runtime.h
+++ b/include/linux/pm_runtime.h
@@ -38,7 +38,7 @@ extern int pm_runtime_force_resume(struct device *dev);
extern int __pm_runtime_idle(struct device *dev, int rpmflags);
extern int __pm_runtime_suspend(struct device *dev, int rpmflags);
extern int __pm_runtime_resume(struct device *dev, int rpmflags);
-extern int pm_runtime_get_if_in_use(struct device *dev);
+extern int pm_runtime_get_if_active(struct device *dev, bool ign_usage_count);
extern int pm_schedule_suspend(struct device *dev, unsigned int delay);
extern int __pm_runtime_set_status(struct device *dev, unsigned int status);
extern int pm_runtime_barrier(struct device *dev);
@@ -60,6 +60,11 @@ extern void pm_runtime_put_suppliers(struct device *dev);
extern void pm_runtime_new_link(struct device *dev);
extern void pm_runtime_drop_link(struct device *dev);
+static inline int pm_runtime_get_if_in_use(struct device *dev)
+{
+ return pm_runtime_get_if_active(dev, false);
+}
+
static inline void pm_suspend_ignore_children(struct device *dev, bool enable)
{
dev->power.ignore_children = enable;
@@ -143,6 +148,11 @@ static inline int pm_runtime_get_if_in_use(struct device *dev)
{
return -EINVAL;
}
+static inline int pm_runtime_get_if_active(struct device *dev,
+ bool ign_usage_count)
+{
+ return -EINVAL;
+}
static inline int __pm_runtime_set_status(struct device *dev,
unsigned int status) { return 0; }
static inline int pm_runtime_barrier(struct device *dev) { return 0; }
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index 661efa029c96..aa3da6611533 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -63,6 +63,11 @@ struct wakeup_source {
bool autosleep_enabled:1;
};
+#define for_each_wakeup_source(ws) \
+ for ((ws) = wakeup_sources_walk_start(); \
+ (ws); \
+ (ws) = wakeup_sources_walk_next((ws)))
+
#ifdef CONFIG_PM_SLEEP
/*
@@ -92,6 +97,10 @@ extern void wakeup_source_remove(struct wakeup_source *ws);
extern struct wakeup_source *wakeup_source_register(struct device *dev,
const char *name);
extern void wakeup_source_unregister(struct wakeup_source *ws);
+extern int wakeup_sources_read_lock(void);
+extern void wakeup_sources_read_unlock(int idx);
+extern struct wakeup_source *wakeup_sources_walk_start(void);
+extern struct wakeup_source *wakeup_sources_walk_next(struct wakeup_source *ws);
extern int device_wakeup_enable(struct device *dev);
extern int device_wakeup_disable(struct device *dev);
extern void device_set_wakeup_capable(struct device *dev, bool capable);
diff --git a/include/linux/pmbus.h b/include/linux/pmbus.h
index 08468fca5ea2..1ea5bae708a1 100644
--- a/include/linux/pmbus.h
+++ b/include/linux/pmbus.h
@@ -8,6 +8,8 @@
#ifndef _PMBUS_H_
#define _PMBUS_H_
+#include <linux/bits.h>
+
/* flags */
/*
@@ -23,7 +25,14 @@
* communication errors for no explicable reason. For such chips, checking
* the status register must be disabled.
*/
-#define PMBUS_SKIP_STATUS_CHECK (1 << 0)
+#define PMBUS_SKIP_STATUS_CHECK BIT(0)
+
+/*
+ * PMBUS_WRITE_PROTECTED
+ * Set if the chip is write protected and write protection is not determined
+ * by the standard WRITE_PROTECT command.
+ */
+#define PMBUS_WRITE_PROTECTED BIT(1)
struct pmbus_platform_data {
u32 flags; /* Device specific flags */
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index 3b12fd28af78..b18dca67253d 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -379,7 +379,7 @@ struct pnp_id {
};
struct pnp_driver {
- char *name;
+ const char *name;
const struct pnp_device_id *id_table;
unsigned int flags;
int (*probe) (struct pnp_dev *dev, const struct pnp_device_id *dev_id);
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index fe6cfdcfbc26..468328b1e1dd 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -69,29 +69,32 @@ struct posix_clock_operations {
*
* @ops: Functional interface to the clock
* @cdev: Character device instance for this clock
- * @kref: Reference count.
+ * @dev: Pointer to the clock's device.
* @rwsem: Protects the 'zombie' field from concurrent access.
* @zombie: If 'zombie' is true, then the hardware has disappeared.
- * @release: A function to free the structure when the reference count reaches
- * zero. May be NULL if structure is statically allocated.
*
* Drivers should embed their struct posix_clock within a private
* structure, obtaining a reference to it during callbacks using
* container_of().
+ *
+ * Drivers should supply an initialized but not exposed struct device
+ * to posix_clock_register(). It is used to manage lifetime of the
+ * driver's private structure. It's 'release' field should be set to
+ * a release function for this private structure.
*/
struct posix_clock {
struct posix_clock_operations ops;
struct cdev cdev;
- struct kref kref;
+ struct device *dev;
struct rw_semaphore rwsem;
bool zombie;
- void (*release)(struct posix_clock *clk);
};
/**
* posix_clock_register() - register a new clock
- * @clk: Pointer to the clock. Caller must provide 'ops' and 'release'
- * @devid: Allocated device id
+ * @clk: Pointer to the clock. Caller must provide 'ops' field
+ * @dev: Pointer to the initialized device. Caller must provide
+ * 'release' field
*
* A clock driver calls this function to register itself with the
* clock device subsystem. If 'clk' points to dynamically allocated
@@ -100,7 +103,7 @@ struct posix_clock {
*
* Returns zero on success, non-zero otherwise.
*/
-int posix_clock_register(struct posix_clock *clk, dev_t devid);
+int posix_clock_register(struct posix_clock *clk, struct device *dev);
/**
* posix_clock_unregister() - unregister a clock
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 3d10c84a97a9..e3f0f8585da4 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -69,7 +69,7 @@ static inline int clockid_to_fd(const clockid_t clk)
struct cpu_timer {
struct timerqueue_node node;
struct timerqueue_head *head;
- struct task_struct *task;
+ struct pid *pid;
struct list_head elist;
int firing;
};
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index 4badd5322949..d55c746ac56e 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -105,11 +105,56 @@ enum max17042_register {
MAX17042_OCV = 0xEE,
- MAX17042_OCVInternal = 0xFB,
+ MAX17042_OCVInternal = 0xFB, /* MAX17055 VFOCV */
MAX17042_VFSOC = 0xFF,
};
+enum max17055_register {
+ MAX17055_QRes = 0x0C,
+ MAX17055_TTF = 0x20,
+ MAX17055_V_empty = 0x3A,
+ MAX17055_TIMER = 0x3E,
+ MAX17055_USER_MEM = 0x40,
+ MAX17055_RGAIN = 0x42,
+
+ MAX17055_ConvgCfg = 0x49,
+ MAX17055_VFRemCap = 0x4A,
+
+ MAX17055_STATUS2 = 0xB0,
+ MAX17055_POWER = 0xB1,
+ MAX17055_ID = 0xB2,
+ MAX17055_AvgPower = 0xB3,
+ MAX17055_IAlrtTh = 0xB4,
+ MAX17055_TTFCfg = 0xB5,
+ MAX17055_CVMixCap = 0xB6,
+ MAX17055_CVHalfTime = 0xB7,
+ MAX17055_CGTempCo = 0xB8,
+ MAX17055_Curve = 0xB9,
+ MAX17055_HibCfg = 0xBA,
+ MAX17055_Config2 = 0xBB,
+ MAX17055_VRipple = 0xBC,
+ MAX17055_RippleCfg = 0xBD,
+ MAX17055_TimerH = 0xBE,
+
+ MAX17055_RSense = 0xD0,
+ MAX17055_ScOcvLim = 0xD1,
+
+ MAX17055_SOCHold = 0xD3,
+ MAX17055_MaxPeakPwr = 0xD4,
+ MAX17055_SusPeakPwr = 0xD5,
+ MAX17055_PackResistance = 0xD6,
+ MAX17055_SysResistance = 0xD7,
+ MAX17055_MinSysV = 0xD8,
+ MAX17055_MPPCurrent = 0xD9,
+ MAX17055_SPPCurrent = 0xDA,
+ MAX17055_ModelCfg = 0xDB,
+ MAX17055_AtQResidual = 0xDC,
+ MAX17055_AtTTE = 0xDD,
+ MAX17055_AtAvSOC = 0xDE,
+ MAX17055_AtAvCap = 0xDF,
+};
+
/* Registers specific to max17047/50 */
enum max17047_register {
MAX17047_QRTbl00 = 0x12,
@@ -125,6 +170,7 @@ enum max170xx_chip_type {
MAXIM_DEVICE_TYPE_MAX17042,
MAXIM_DEVICE_TYPE_MAX17047,
MAXIM_DEVICE_TYPE_MAX17050,
+ MAXIM_DEVICE_TYPE_MAX17055,
MAXIM_DEVICE_TYPE_NUM
};
diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
index d0b37e937037..971c9264179e 100644
--- a/include/linux/power/smartreflex.h
+++ b/include/linux/power/smartreflex.h
@@ -293,6 +293,9 @@ struct omap_sr_data {
struct voltagedomain *voltdm;
};
+
+extern struct omap_sr_data omap_sr_pdata[OMAP_SR_NR];
+
#ifdef CONFIG_POWER_AVS_OMAP
/* Smartreflex module enable/disable interface */
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 28413f737e7d..dcd5a71e6c67 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -325,6 +325,11 @@ struct power_supply_battery_ocv_table {
int capacity; /* percent */
};
+struct power_supply_resistance_temp_table {
+ int temp; /* celsius */
+ int resistance; /* internal resistance percent */
+};
+
#define POWER_SUPPLY_OCV_TEMP_MAX 20
/*
@@ -349,6 +354,8 @@ struct power_supply_battery_info {
int ocv_temp[POWER_SUPPLY_OCV_TEMP_MAX];/* celsius */
struct power_supply_battery_ocv_table *ocv_table[POWER_SUPPLY_OCV_TEMP_MAX];
int ocv_table_size[POWER_SUPPLY_OCV_TEMP_MAX];
+ struct power_supply_resistance_temp_table *resist_table;
+ int resist_table_size;
};
extern struct atomic_notifier_head power_supply_notifier;
@@ -381,6 +388,9 @@ power_supply_find_ocv2cap_table(struct power_supply_battery_info *info,
int temp, int *table_len);
extern int power_supply_batinfo_ocv2cap(struct power_supply_battery_info *info,
int ocv, int temp);
+extern int
+power_supply_temp2resist_simple(struct power_supply_resistance_temp_table *table,
+ int table_len, int temp);
extern void power_supply_changed(struct power_supply *psy);
extern int power_supply_am_i_supplied(struct power_supply *psy);
extern int power_supply_set_input_current_limit_from_supplier(
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index bbb68dba37cc..bc3f1aecaa19 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -322,4 +322,34 @@ static inline void preempt_notifier_init(struct preempt_notifier *notifier,
#endif
+/**
+ * migrate_disable - Prevent migration of the current task
+ *
+ * Maps to preempt_disable() which also disables preemption. Use
+ * migrate_disable() to annotate that the intent is to prevent migration,
+ * but not necessarily preemption.
+ *
+ * Can be invoked nested like preempt_disable() and needs the corresponding
+ * number of migrate_enable() invocations.
+ */
+static __always_inline void migrate_disable(void)
+{
+ preempt_disable();
+}
+
+/**
+ * migrate_enable - Allow migration of the current task
+ *
+ * Counterpart to migrate_disable().
+ *
+ * As migrate_disable() can be invoked nested, only the outermost invocation
+ * reenables migration.
+ *
+ * Currently mapped to preempt_enable().
+ */
+static __always_inline void migrate_enable(void)
+{
+ preempt_enable();
+}
+
#endif /* __LINUX_PREEMPT_H */
diff --git a/include/linux/printk.h b/include/linux/printk.h
index c09d67edda3a..1e6108b8d15f 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -302,9 +302,8 @@ extern int kptr_restrict;
printk(KERN_CRIT pr_fmt(fmt), ##__VA_ARGS__)
#define pr_err(fmt, ...) \
printk(KERN_ERR pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warning(fmt, ...) \
+#define pr_warn(fmt, ...) \
printk(KERN_WARNING pr_fmt(fmt), ##__VA_ARGS__)
-#define pr_warn pr_warning
#define pr_notice(fmt, ...) \
printk(KERN_NOTICE pr_fmt(fmt), ##__VA_ARGS__)
#define pr_info(fmt, ...) \
diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
index a705aa2d03f9..3dfa92633af3 100644
--- a/include/linux/proc_fs.h
+++ b/include/linux/proc_fs.h
@@ -12,6 +12,21 @@ struct proc_dir_entry;
struct seq_file;
struct seq_operations;
+struct proc_ops {
+ int (*proc_open)(struct inode *, struct file *);
+ ssize_t (*proc_read)(struct file *, char __user *, size_t, loff_t *);
+ ssize_t (*proc_write)(struct file *, const char __user *, size_t, loff_t *);
+ loff_t (*proc_lseek)(struct file *, loff_t, int);
+ int (*proc_release)(struct inode *, struct file *);
+ __poll_t (*proc_poll)(struct file *, struct poll_table_struct *);
+ long (*proc_ioctl)(struct file *, unsigned int, unsigned long);
+#ifdef CONFIG_COMPAT
+ long (*proc_compat_ioctl)(struct file *, unsigned int, unsigned long);
+#endif
+ int (*proc_mmap)(struct file *, struct vm_area_struct *);
+ unsigned long (*proc_get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
+};
+
#ifdef CONFIG_PROC_FS
typedef int (*proc_write_t)(struct file *, char *, size_t);
@@ -43,10 +58,10 @@ struct proc_dir_entry *proc_create_single_data(const char *name, umode_t mode,
extern struct proc_dir_entry *proc_create_data(const char *, umode_t,
struct proc_dir_entry *,
- const struct file_operations *,
+ const struct proc_ops *,
void *);
-struct proc_dir_entry *proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct file_operations *proc_fops);
+struct proc_dir_entry *proc_create(const char *name, umode_t mode, struct proc_dir_entry *parent, const struct proc_ops *proc_ops);
extern void proc_set_size(struct proc_dir_entry *, loff_t);
extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
extern void *PDE_DATA(const struct inode *);
@@ -58,8 +73,8 @@ extern int remove_proc_subtree(const char *, struct proc_dir_entry *);
struct proc_dir_entry *proc_create_net_data(const char *name, umode_t mode,
struct proc_dir_entry *parent, const struct seq_operations *ops,
unsigned int state_size, void *data);
-#define proc_create_net(name, mode, parent, state_size, ops) \
- proc_create_net_data(name, mode, parent, state_size, ops, NULL)
+#define proc_create_net(name, mode, parent, ops, state_size) \
+ proc_create_net_data(name, mode, parent, ops, state_size, NULL)
struct proc_dir_entry *proc_create_net_single(const char *name, umode_t mode,
struct proc_dir_entry *parent,
int (*show)(struct seq_file *, void *), void *data);
@@ -108,8 +123,8 @@ static inline struct proc_dir_entry *proc_mkdir_mode(const char *name,
#define proc_create_seq(name, mode, parent, ops) ({NULL;})
#define proc_create_single(name, mode, parent, show) ({NULL;})
#define proc_create_single_data(name, mode, parent, show, data) ({NULL;})
-#define proc_create(name, mode, parent, proc_fops) ({NULL;})
-#define proc_create_data(name, mode, parent, proc_fops, data) ({NULL;})
+#define proc_create(name, mode, parent, proc_ops) ({NULL;})
+#define proc_create_data(name, mode, parent, proc_ops, data) ({NULL;})
static inline void proc_set_size(struct proc_dir_entry *de, loff_t size) {}
static inline void proc_set_user(struct proc_dir_entry *de, kuid_t uid, kgid_t gid) {}
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index d31cb6215905..4626b1ac3b6c 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -32,6 +32,8 @@ extern const struct proc_ns_operations pidns_for_children_operations;
extern const struct proc_ns_operations userns_operations;
extern const struct proc_ns_operations mntns_operations;
extern const struct proc_ns_operations cgroupns_operations;
+extern const struct proc_ns_operations timens_operations;
+extern const struct proc_ns_operations timens_for_children_operations;
/*
* We always define these enumerators
@@ -43,6 +45,7 @@ enum {
PROC_USER_INIT_INO = 0xEFFFFFFDU,
PROC_PID_INIT_INO = 0xEFFFFFFCU,
PROC_CGROUP_INIT_INO = 0xEFFFFFFBU,
+ PROC_TIME_INIT_INO = 0xEFFFFFFAU,
};
#ifdef CONFIG_PROC_FS
@@ -76,10 +79,10 @@ static inline int ns_alloc_inum(struct ns_common *ns)
extern struct file *proc_ns_fget(int fd);
#define get_proc_ns(inode) ((struct ns_common *)(inode)->i_private)
-extern void *ns_get_path(struct path *path, struct task_struct *task,
+extern int ns_get_path(struct path *path, struct task_struct *task,
const struct proc_ns_operations *ns_ops);
typedef struct ns_common *ns_get_path_helper_t(void *);
-extern void *ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb,
+extern int ns_get_path_cb(struct path *path, ns_get_path_helper_t ns_get_cb,
void *private_data);
extern int ns_get_name(char *buf, size_t size, struct task_struct *task,
diff --git a/include/linux/property.h b/include/linux/property.h
index 9b3d4ca3a73a..d86de017c689 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -22,7 +22,7 @@ enum dev_prop_type {
DEV_PROP_U32,
DEV_PROP_U64,
DEV_PROP_STRING,
- DEV_PROP_MAX,
+ DEV_PROP_REF,
};
enum dev_dma_attr {
@@ -80,9 +80,14 @@ struct fwnode_handle *fwnode_find_reference(const struct fwnode_handle *fwnode,
const char *name,
unsigned int index);
+const char *fwnode_get_name(const struct fwnode_handle *fwnode);
+const char *fwnode_get_name_prefix(const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_get_parent(const struct fwnode_handle *fwnode);
struct fwnode_handle *fwnode_get_next_parent(
struct fwnode_handle *fwnode);
+unsigned int fwnode_count_parents(const struct fwnode_handle *fwn);
+struct fwnode_handle *fwnode_get_nth_parent(struct fwnode_handle *fwn,
+ unsigned int depth);
struct fwnode_handle *fwnode_get_next_child_node(
const struct fwnode_handle *fwnode, struct fwnode_handle *child);
struct fwnode_handle *fwnode_get_next_available_child_node(
@@ -219,99 +224,133 @@ static inline int fwnode_property_count_u64(const struct fwnode_handle *fwnode,
return fwnode_property_read_u64_array(fwnode, propname, NULL, 0);
}
+struct software_node;
+
+/**
+ * struct software_node_ref_args - Reference property with additional arguments
+ * @node: Reference to a software node
+ * @nargs: Number of elements in @args array
+ * @args: Integer arguments
+ */
+struct software_node_ref_args {
+ const struct software_node *node;
+ unsigned int nargs;
+ u64 args[NR_FWNODE_REFERENCE_ARGS];
+};
+
/**
* struct property_entry - "Built-in" device property representation.
* @name: Name of the property.
* @length: Length of data making up the value.
- * @is_array: True when the property is an array.
+ * @is_inline: True when the property value is stored inline.
* @type: Type of the data in unions.
- * @pointer: Pointer to the property (an array of items of the given type).
- * @value: Value of the property (when it is a single item of the given type).
+ * @pointer: Pointer to the property when it is not stored inline.
+ * @value: Value of the property when it is stored inline.
*/
struct property_entry {
const char *name;
size_t length;
- bool is_array;
+ bool is_inline;
enum dev_prop_type type;
union {
+ const void *pointer;
union {
- const u8 *u8_data;
- const u16 *u16_data;
- const u32 *u32_data;
- const u64 *u64_data;
- const char * const *str;
- } pointer;
- union {
- u8 u8_data;
- u16 u16_data;
- u32 u32_data;
- u64 u64_data;
- const char *str;
+ u8 u8_data[sizeof(u64) / sizeof(u8)];
+ u16 u16_data[sizeof(u64) / sizeof(u16)];
+ u32 u32_data[sizeof(u64) / sizeof(u32)];
+ u64 u64_data[sizeof(u64) / sizeof(u64)];
+ const char *str[sizeof(u64) / sizeof(char *)];
} value;
};
};
/*
- * Note: the below four initializers for the anonymous union are carefully
+ * Note: the below initializers for the anonymous union are carefully
* crafted to avoid gcc-4.4.4's problems with initialization of anon unions
* and structs.
*/
-#define PROPERTY_ENTRY_INTEGER_ARRAY(_name_, _type_, _Type_, _val_) \
+#define __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_) \
+ sizeof(((struct property_entry *)NULL)->value._elem_[0])
+
+#define __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, _elsize_, _Type_, \
+ _val_, _len_) \
(struct property_entry) { \
.name = _name_, \
- .length = ARRAY_SIZE(_val_) * sizeof(_type_), \
- .is_array = true, \
+ .length = (_len_) * (_elsize_), \
.type = DEV_PROP_##_Type_, \
- { .pointer = { ._type_##_data = _val_ } }, \
-}
-
-#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u8, U8, _val_)
-#define PROPERTY_ENTRY_U16_ARRAY(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u16, U16, _val_)
-#define PROPERTY_ENTRY_U32_ARRAY(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u32, U32, _val_)
-#define PROPERTY_ENTRY_U64_ARRAY(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER_ARRAY(_name_, u64, U64, _val_)
-
-#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \
-(struct property_entry) { \
- .name = _name_, \
- .length = ARRAY_SIZE(_val_) * sizeof(const char *), \
- .is_array = true, \
- .type = DEV_PROP_STRING, \
- { .pointer = { .str = _val_ } }, \
+ { .pointer = _val_ }, \
}
-#define PROPERTY_ENTRY_INTEGER(_name_, _type_, _Type_, _val_) \
-(struct property_entry) { \
- .name = _name_, \
- .length = sizeof(_type_), \
- .type = DEV_PROP_##_Type_, \
- { .value = { ._type_##_data = _val_ } }, \
+#define __PROPERTY_ENTRY_ARRAY_LEN(_name_, _elem_, _Type_, _val_, _len_)\
+ __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, \
+ __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \
+ _Type_, _val_, _len_)
+
+#define PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_LEN(_name_, u8_data, U8, _val_, _len_)
+#define PROPERTY_ENTRY_U16_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_LEN(_name_, u16_data, U16, _val_, _len_)
+#define PROPERTY_ENTRY_U32_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_LEN(_name_, u32_data, U32, _val_, _len_)
+#define PROPERTY_ENTRY_U64_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_LEN(_name_, u64_data, U64, _val_, _len_)
+#define PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_LEN(_name_, str, STRING, _val_, _len_)
+#define PROPERTY_ENTRY_REF_ARRAY_LEN(_name_, _val_, _len_) \
+ __PROPERTY_ENTRY_ARRAY_ELSIZE_LEN(_name_, \
+ sizeof(struct software_node_ref_args), \
+ REF, _val_, _len_)
+
+#define PROPERTY_ENTRY_U8_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_U8_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+#define PROPERTY_ENTRY_U16_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_U16_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+#define PROPERTY_ENTRY_U32_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_U32_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+#define PROPERTY_ENTRY_U64_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_U64_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+#define PROPERTY_ENTRY_STRING_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_STRING_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+#define PROPERTY_ENTRY_REF_ARRAY(_name_, _val_) \
+ PROPERTY_ENTRY_REF_ARRAY_LEN(_name_, _val_, ARRAY_SIZE(_val_))
+
+#define __PROPERTY_ENTRY_ELEMENT(_name_, _elem_, _Type_, _val_) \
+(struct property_entry) { \
+ .name = _name_, \
+ .length = __PROPERTY_ENTRY_ELEMENT_SIZE(_elem_), \
+ .is_inline = true, \
+ .type = DEV_PROP_##_Type_, \
+ { .value = { ._elem_[0] = _val_ } }, \
}
-#define PROPERTY_ENTRY_U8(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER(_name_, u8, U8, _val_)
-#define PROPERTY_ENTRY_U16(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER(_name_, u16, U16, _val_)
-#define PROPERTY_ENTRY_U32(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER(_name_, u32, U32, _val_)
-#define PROPERTY_ENTRY_U64(_name_, _val_) \
- PROPERTY_ENTRY_INTEGER(_name_, u64, U64, _val_)
-
-#define PROPERTY_ENTRY_STRING(_name_, _val_) \
-(struct property_entry) { \
- .name = _name_, \
- .length = sizeof(const char *), \
- .type = DEV_PROP_STRING, \
- { .value = { .str = _val_ } }, \
-}
+#define PROPERTY_ENTRY_U8(_name_, _val_) \
+ __PROPERTY_ENTRY_ELEMENT(_name_, u8_data, U8, _val_)
+#define PROPERTY_ENTRY_U16(_name_, _val_) \
+ __PROPERTY_ENTRY_ELEMENT(_name_, u16_data, U16, _val_)
+#define PROPERTY_ENTRY_U32(_name_, _val_) \
+ __PROPERTY_ENTRY_ELEMENT(_name_, u32_data, U32, _val_)
+#define PROPERTY_ENTRY_U64(_name_, _val_) \
+ __PROPERTY_ENTRY_ELEMENT(_name_, u64_data, U64, _val_)
+#define PROPERTY_ENTRY_STRING(_name_, _val_) \
+ __PROPERTY_ENTRY_ELEMENT(_name_, str, STRING, _val_)
#define PROPERTY_ENTRY_BOOL(_name_) \
(struct property_entry) { \
.name = _name_, \
+ .is_inline = true, \
+}
+
+#define PROPERTY_ENTRY_REF(_name_, _ref_, ...) \
+(struct property_entry) { \
+ .name = _name_, \
+ .length = sizeof(struct software_node_ref_args), \
+ .type = DEV_PROP_REF, \
+ { .pointer = &(const struct software_node_ref_args) { \
+ .node = _ref_, \
+ .nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \
+ .args = { __VA_ARGS__ }, \
+ } }, \
}
struct property_entry *
@@ -377,48 +416,21 @@ int fwnode_graph_parse_endpoint(const struct fwnode_handle *fwnode,
/* -------------------------------------------------------------------------- */
/* Software fwnode support - when HW description is incomplete or missing */
-struct software_node;
-
-/**
- * struct software_node_ref_args - Reference with additional arguments
- * @node: Reference to a software node
- * @nargs: Number of elements in @args array
- * @args: Integer arguments
- */
-struct software_node_ref_args {
- const struct software_node *node;
- unsigned int nargs;
- u64 args[NR_FWNODE_REFERENCE_ARGS];
-};
-
-/**
- * struct software_node_reference - Named software node reference property
- * @name: Name of the property
- * @nrefs: Number of elements in @refs array
- * @refs: Array of references with optional arguments
- */
-struct software_node_reference {
- const char *name;
- unsigned int nrefs;
- const struct software_node_ref_args *refs;
-};
-
/**
* struct software_node - Software node description
* @name: Name of the software node
* @parent: Parent of the software node
* @properties: Array of device properties
- * @references: Array of software node reference properties
*/
struct software_node {
const char *name;
const struct software_node *parent;
const struct property_entry *properties;
- const struct software_node_reference *references;
};
bool is_software_node(const struct fwnode_handle *fwnode);
-const struct software_node *to_software_node(struct fwnode_handle *fwnode);
+const struct software_node *
+to_software_node(const struct fwnode_handle *fwnode);
struct fwnode_handle *software_node_fwnode(const struct software_node *node);
const struct software_node *
diff --git a/include/linux/psci.h b/include/linux/psci.h
index e2bacc6fd2f2..a67712b73b6c 100644
--- a/include/linux/psci.h
+++ b/include/linux/psci.h
@@ -7,6 +7,7 @@
#ifndef __LINUX_PSCI_H
#define __LINUX_PSCI_H
+#include <linux/arm-smccc.h>
#include <linux/init.h>
#include <linux/types.h>
@@ -17,12 +18,8 @@ bool psci_tos_resident_on(int cpu);
int psci_cpu_suspend_enter(u32 state);
bool psci_power_state_is_valid(u32 state);
-
-enum psci_conduit {
- PSCI_CONDUIT_NONE,
- PSCI_CONDUIT_SMC,
- PSCI_CONDUIT_HVC,
-};
+int psci_set_osi_mode(void);
+bool psci_has_osi_support(void);
enum smccc_version {
SMCCC_VERSION_1_0,
@@ -38,7 +35,7 @@ struct psci_operations {
int (*affinity_info)(unsigned long target_affinity,
unsigned long lowest_affinity_level);
int (*migrate_info_type)(void);
- enum psci_conduit conduit;
+ enum arm_smccc_conduit conduit;
enum smccc_version smccc_version;
};
diff --git a/include/linux/psi.h b/include/linux/psi.h
index 7b3de7321219..7361023f3fdd 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -17,6 +17,8 @@ extern struct psi_group psi_system;
void psi_init(void);
void psi_task_change(struct task_struct *task, int clear, int set);
+void psi_task_switch(struct task_struct *prev, struct task_struct *next,
+ bool sleep);
void psi_memstall_tick(struct task_struct *task, int cpu);
void psi_memstall_enter(unsigned long *flags);
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
index 07aaf9b82241..4b7258495a04 100644
--- a/include/linux/psi_types.h
+++ b/include/linux/psi_types.h
@@ -14,13 +14,21 @@ enum psi_task_count {
NR_IOWAIT,
NR_MEMSTALL,
NR_RUNNING,
- NR_PSI_TASK_COUNTS = 3,
+ /*
+ * This can't have values other than 0 or 1 and could be
+ * implemented as a bit flag. But for now we still have room
+ * in the first cacheline of psi_group_cpu, and this way we
+ * don't have to special case any state tracking for it.
+ */
+ NR_ONCPU,
+ NR_PSI_TASK_COUNTS = 4,
};
/* Task state bitmasks */
#define TSK_IOWAIT (1 << NR_IOWAIT)
#define TSK_MEMSTALL (1 << NR_MEMSTALL)
#define TSK_RUNNING (1 << NR_RUNNING)
+#define TSK_ONCPU (1 << NR_ONCPU)
/* Resources that workloads could be stalled on */
enum psi_res {
diff --git a/include/linux/psp-tee.h b/include/linux/psp-tee.h
new file mode 100644
index 000000000000..cb0c95d6d76b
--- /dev/null
+++ b/include/linux/psp-tee.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: MIT */
+/*
+ * AMD Trusted Execution Environment (TEE) interface
+ *
+ * Author: Rijo Thomas <Rijo-john.Thomas@amd.com>
+ *
+ * Copyright 2019 Advanced Micro Devices, Inc.
+ *
+ */
+
+#ifndef __PSP_TEE_H_
+#define __PSP_TEE_H_
+
+#include <linux/types.h>
+#include <linux/errno.h>
+
+/* This file defines the Trusted Execution Environment (TEE) interface commands
+ * and the API exported by AMD Secure Processor driver to communicate with
+ * AMD-TEE Trusted OS.
+ */
+
+/**
+ * enum tee_cmd_id - TEE Interface Command IDs
+ * @TEE_CMD_ID_LOAD_TA: Load Trusted Application (TA) binary into
+ * TEE environment
+ * @TEE_CMD_ID_UNLOAD_TA: Unload TA binary from TEE environment
+ * @TEE_CMD_ID_OPEN_SESSION: Open session with loaded TA
+ * @TEE_CMD_ID_CLOSE_SESSION: Close session with loaded TA
+ * @TEE_CMD_ID_INVOKE_CMD: Invoke a command with loaded TA
+ * @TEE_CMD_ID_MAP_SHARED_MEM: Map shared memory
+ * @TEE_CMD_ID_UNMAP_SHARED_MEM: Unmap shared memory
+ */
+enum tee_cmd_id {
+ TEE_CMD_ID_LOAD_TA = 1,
+ TEE_CMD_ID_UNLOAD_TA,
+ TEE_CMD_ID_OPEN_SESSION,
+ TEE_CMD_ID_CLOSE_SESSION,
+ TEE_CMD_ID_INVOKE_CMD,
+ TEE_CMD_ID_MAP_SHARED_MEM,
+ TEE_CMD_ID_UNMAP_SHARED_MEM,
+};
+
+#ifdef CONFIG_CRYPTO_DEV_SP_PSP
+/**
+ * psp_tee_process_cmd() - Process command in Trusted Execution Environment
+ * @cmd_id: TEE command ID (&enum tee_cmd_id)
+ * @buf: Command buffer for TEE processing. On success, is updated
+ * with the response
+ * @len: Length of command buffer in bytes
+ * @status: On success, holds the TEE command execution status
+ *
+ * This function submits a command to the Trusted OS for processing in the
+ * TEE environment and waits for a response or until the command times out.
+ *
+ * Returns:
+ * 0 if TEE successfully processed the command
+ * -%ENODEV if PSP device not available
+ * -%EINVAL if invalid input
+ * -%ETIMEDOUT if TEE command timed out
+ * -%EBUSY if PSP device is not responsive
+ */
+int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf, size_t len,
+ u32 *status);
+
+/**
+ * psp_check_tee_status() - Checks whether there is a TEE which a driver can
+ * talk to.
+ *
+ * This function can be used by AMD-TEE driver to query if there is TEE with
+ * which it can communicate.
+ *
+ * Returns:
+ * 0 if the device has TEE
+ * -%ENODEV if there is no TEE available
+ */
+int psp_check_tee_status(void);
+
+#else /* !CONFIG_CRYPTO_DEV_SP_PSP */
+
+static inline int psp_tee_process_cmd(enum tee_cmd_id cmd_id, void *buf,
+ size_t len, u32 *status)
+{
+ return -ENODEV;
+}
+
+static inline int psp_check_tee_status(void)
+{
+ return -ENODEV;
+}
+#endif /* CONFIG_CRYPTO_DEV_SP_PSP */
+#endif /* __PSP_TEE_H_ */
diff --git a/include/linux/ptdump.h b/include/linux/ptdump.h
new file mode 100644
index 000000000000..a67065c403c3
--- /dev/null
+++ b/include/linux/ptdump.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_PTDUMP_H
+#define _LINUX_PTDUMP_H
+
+#include <linux/mm_types.h>
+
+struct ptdump_range {
+ unsigned long start;
+ unsigned long end;
+};
+
+struct ptdump_state {
+ /* level is 0:PGD to 4:PTE, or -1 if unknown */
+ void (*note_page)(struct ptdump_state *st, unsigned long addr,
+ int level, unsigned long val);
+ const struct ptdump_range *range;
+};
+
+void ptdump_walk_pgd(struct ptdump_state *st, struct mm_struct *mm, pgd_t *pgd);
+
+#endif /* _LINUX_PTDUMP_H */
diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h
index 93cc4f1d444a..c64a1ef87240 100644
--- a/include/linux/ptp_clock_kernel.h
+++ b/include/linux/ptp_clock_kernel.h
@@ -243,6 +243,13 @@ int ptp_find_pin(struct ptp_clock *ptp,
int ptp_schedule_worker(struct ptp_clock *ptp, unsigned long delay);
+/**
+ * ptp_cancel_worker_sync() - cancel ptp auxiliary clock
+ *
+ * @ptp: The clock obtained from ptp_clock_register().
+ */
+void ptp_cancel_worker_sync(struct ptp_clock *ptp);
+
#else
static inline struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
struct device *parent)
@@ -260,6 +267,8 @@ static inline int ptp_find_pin(struct ptp_clock *ptp,
static inline int ptp_schedule_worker(struct ptp_clock *ptp,
unsigned long delay)
{ return -EOPNOTSUPP; }
+static inline void ptp_cancel_worker_sync(struct ptp_clock *ptp)
+{ }
#endif
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 0abe9a4fc842..417db0a79a62 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -23,6 +23,7 @@
#include <linux/types.h>
#include <linux/compiler.h>
#include <linux/slab.h>
+#include <linux/mm.h>
#include <asm/errno.h>
#endif
diff --git a/include/linux/pwm.h b/include/linux/pwm.h
index b2c9c460947d..0ef808d925bb 100644
--- a/include/linux/pwm.h
+++ b/include/linux/pwm.h
@@ -243,10 +243,7 @@ pwm_set_relative_duty_cycle(struct pwm_state *state, unsigned int duty_cycle,
* @request: optional hook for requesting a PWM
* @free: optional hook for freeing a PWM
* @capture: capture and report PWM signal
- * @apply: atomically apply a new PWM config. The state argument
- * should be adjusted with the real hardware config (if the
- * approximate the period or duty_cycle value, state should
- * reflect it)
+ * @apply: atomically apply a new PWM config
* @get_state: get the current PWM state. This function is only
* called once per PWM device when the PWM chip is
* registered.
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index a5d1837e4f35..6facf27865f9 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -206,7 +206,7 @@ enum pxa_ssp_type {
};
struct ssp_device {
- struct platform_device *pdev;
+ struct device *dev;
struct list_head node;
struct clk *clk;
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index 2d5eff506e13..3d6a24697761 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2010-2015, 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2015, 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (C) 2015 Linaro Ltd.
*/
#ifndef __QCOM_SCM_H
@@ -24,6 +24,26 @@ struct qcom_scm_vmperm {
int perm;
};
+enum qcom_scm_ocmem_client {
+ QCOM_SCM_OCMEM_UNUSED_ID = 0x0,
+ QCOM_SCM_OCMEM_GRAPHICS_ID,
+ QCOM_SCM_OCMEM_VIDEO_ID,
+ QCOM_SCM_OCMEM_LP_AUDIO_ID,
+ QCOM_SCM_OCMEM_SENSORS_ID,
+ QCOM_SCM_OCMEM_OTHER_OS_ID,
+ QCOM_SCM_OCMEM_DEBUG_ID,
+};
+
+enum qcom_scm_sec_dev_id {
+ QCOM_SCM_MDSS_DEV_ID = 1,
+ QCOM_SCM_OCMEM_DEV_ID = 5,
+ QCOM_SCM_PCIE0_DEV_ID = 11,
+ QCOM_SCM_PCIE1_DEV_ID = 12,
+ QCOM_SCM_GFX_DEV_ID = 18,
+ QCOM_SCM_UFS_DEV_ID = 19,
+ QCOM_SCM_ICE_DEV_ID = 20,
+};
+
#define QCOM_SCM_VMID_HLOS 0x3
#define QCOM_SCM_VMID_MSS_MSA 0xF
#define QCOM_SCM_VMID_WLAN 0x18
@@ -35,69 +55,94 @@ struct qcom_scm_vmperm {
#define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC)
#if IS_ENABLED(CONFIG_QCOM_SCM)
+extern bool qcom_scm_is_available(void);
+
extern int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus);
extern int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus);
-extern bool qcom_scm_is_available(void);
-extern bool qcom_scm_hdcp_available(void);
-extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
- u32 *resp);
-extern bool qcom_scm_pas_supported(u32 peripheral);
+extern void qcom_scm_cpu_power_down(u32 flags);
+extern int qcom_scm_set_remote_state(u32 state, u32 id);
+
extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
size_t size);
extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
phys_addr_t size);
extern int qcom_scm_pas_auth_and_reset(u32 peripheral);
extern int qcom_scm_pas_shutdown(u32 peripheral);
+extern bool qcom_scm_pas_supported(u32 peripheral);
+
+extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
+extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
+
+extern bool qcom_scm_restore_sec_cfg_available(void);
+extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
+extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
+extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
unsigned int *src,
const struct qcom_scm_vmperm *newvm,
unsigned int dest_cnt);
-extern void qcom_scm_cpu_power_down(u32 flags);
-extern u32 qcom_scm_get_version(void);
-extern int qcom_scm_set_remote_state(u32 state, u32 id);
-extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
-extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
-extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
-extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
-extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
+
+extern bool qcom_scm_ocmem_lock_available(void);
+extern int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
+ u32 size, u32 mode);
+extern int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset,
+ u32 size);
+
+extern bool qcom_scm_hdcp_available(void);
+extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
+ u32 *resp);
+
+extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en);
#else
#include <linux/errno.h>
-static inline
-int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
-{
- return -ENODEV;
-}
-static inline
-int qcom_scm_set_warm_boot_addr(void *entry, const cpumask_t *cpus)
-{
- return -ENODEV;
-}
static inline bool qcom_scm_is_available(void) { return false; }
-static inline bool qcom_scm_hdcp_available(void) { return false; }
-static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
- u32 *resp) { return -ENODEV; }
-static inline bool qcom_scm_pas_supported(u32 peripheral) { return false; }
+
+static inline int qcom_scm_set_cold_boot_addr(void *entry,
+ const cpumask_t *cpus) { return -ENODEV; }
+static inline int qcom_scm_set_warm_boot_addr(void *entry,
+ const cpumask_t *cpus) { return -ENODEV; }
+static inline void qcom_scm_cpu_power_down(u32 flags) {}
+static inline u32 qcom_scm_set_remote_state(u32 state,u32 id)
+ { return -ENODEV; }
+
static inline int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
- size_t size) { return -ENODEV; }
+ size_t size) { return -ENODEV; }
static inline int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
- phys_addr_t size) { return -ENODEV; }
-static inline int
-qcom_scm_pas_auth_and_reset(u32 peripheral) { return -ENODEV; }
+ phys_addr_t size) { return -ENODEV; }
+static inline int qcom_scm_pas_auth_and_reset(u32 peripheral)
+ { return -ENODEV; }
static inline int qcom_scm_pas_shutdown(u32 peripheral) { return -ENODEV; }
+static inline bool qcom_scm_pas_supported(u32 peripheral) { return false; }
+
+static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val)
+ { return -ENODEV; }
+static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val)
+ { return -ENODEV; }
+
+static inline bool qcom_scm_restore_sec_cfg_available(void) { return false; }
+static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare)
+ { return -ENODEV; }
+static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size)
+ { return -ENODEV; }
+static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare)
+ { return -ENODEV; }
static inline int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
- unsigned int *src,
- const struct qcom_scm_vmperm *newvm,
- unsigned int dest_cnt) { return -ENODEV; }
-static inline void qcom_scm_cpu_power_down(u32 flags) {}
-static inline u32 qcom_scm_get_version(void) { return 0; }
-static inline u32
-qcom_scm_set_remote_state(u32 state,u32 id) { return -ENODEV; }
-static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { return -ENODEV; }
-static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { return -ENODEV; }
-static inline int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val) { return -ENODEV; }
-static inline int qcom_scm_io_writel(phys_addr_t addr, unsigned int val) { return -ENODEV; }
+ unsigned int *src, const struct qcom_scm_vmperm *newvm,
+ unsigned int dest_cnt) { return -ENODEV; }
+
+static inline bool qcom_scm_ocmem_lock_available(void) { return false; }
+static inline int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
+ u32 size, u32 mode) { return -ENODEV; }
+static inline int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id,
+ u32 offset, u32 size) { return -ENODEV; }
+
+static inline bool qcom_scm_hdcp_available(void) { return false; }
+static inline int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
+ u32 *resp) { return -ENODEV; }
+
+static inline int qcom_scm_qsmmu500_wait_safe_toggle(bool en)
+ { return -ENODEV; }
#endif
#endif
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 03f59a28fefd..2c4737e6694a 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -76,7 +76,6 @@
#define FW_ASSERT_GENERAL_ATTN_IDX 32
-#define MAX_PINNED_CCFC 32
/* Queue Zone sizes in bytes */
#define TSTORM_QZONE_SIZE 8
@@ -105,12 +104,19 @@
#define CORE_SPQE_PAGE_SIZE_BYTES 4096
-#define MAX_NUM_LL2_RX_QUEUES 48
-#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
+/* Number of LL2 RAM based queues */
+#define MAX_NUM_LL2_RX_RAM_QUEUES 32
+
+/* Number of LL2 context based queues */
+#define MAX_NUM_LL2_RX_CTX_QUEUES 208
+#define MAX_NUM_LL2_RX_QUEUES \
+ (MAX_NUM_LL2_RX_RAM_QUEUES + MAX_NUM_LL2_RX_CTX_QUEUES)
+
+#define MAX_NUM_LL2_TX_STATS_COUNTERS 48
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 37
-#define FW_REVISION_VERSION 7
+#define FW_MINOR_VERSION 42
+#define FW_REVISION_VERSION 2
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -132,10 +138,10 @@
#define MAX_NUM_VFS (MAX_NUM_VFS_K2)
#define MAX_NUM_FUNCTIONS_BB (MAX_NUM_PFS_BB + MAX_NUM_VFS_BB)
-#define MAX_NUM_FUNCTIONS (MAX_NUM_PFS + MAX_NUM_VFS)
#define MAX_FUNCTION_NUMBER_BB (MAX_NUM_PFS + MAX_NUM_VFS_BB)
-#define MAX_FUNCTION_NUMBER (MAX_NUM_PFS + MAX_NUM_VFS)
+#define MAX_FUNCTION_NUMBER_K2 (MAX_NUM_PFS + MAX_NUM_VFS_K2)
+#define MAX_NUM_FUNCTIONS (MAX_FUNCTION_NUMBER_K2)
#define MAX_NUM_VPORTS_K2 (208)
#define MAX_NUM_VPORTS_BB (160)
@@ -222,6 +228,7 @@
#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
+#define DQ_XCM_ROCE_ACK_EDPM_DORQ_SEQ_CMD DQ_XCM_AGG_VAL_SEL_WORD5
/* UCM agg val selection (HW) */
#define DQ_UCM_AGG_VAL_SEL_WORD0 0
@@ -340,6 +347,10 @@
#define DQ_PWM_OFFSET_TCM_ROCE_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 1)
#define DQ_PWM_OFFSET_TCM_IWARP_RQ_PROD (DQ_PWM_OFFSET_TCM16_BASE + 3)
+/* DQ_DEMS_AGG_VAL_BASE */
+#define DQ_PWM_OFFSET_TCM_LL2_PROD_UPDATE \
+ (DQ_PWM_OFFSET_TCM32_BASE + DQ_TCM_AGG_VAL_SEL_REG9 - 4)
+
#define DQ_REGION_SHIFT (12)
/* DPM */
@@ -395,6 +406,7 @@
/* Number of Protocol Indices per Status Block */
#define PIS_PER_SB_E4 12
+#define MAX_PIS_PER_SB PIS_PER_SB
#define CAU_HC_STOPPED_STATE 3
#define CAU_HC_DISABLE_STATE 4
@@ -425,8 +437,6 @@
#define IGU_MEM_PBA_MSIX_RESERVED_UPPER 0x03ff
#define IGU_CMD_INT_ACK_BASE 0x0400
-#define IGU_CMD_INT_ACK_UPPER (IGU_CMD_INT_ACK_BASE + \
- MAX_TOT_SB_PER_PATH - 1)
#define IGU_CMD_INT_ACK_RESERVED_UPPER 0x05ff
#define IGU_CMD_ATTN_BIT_UPD_UPPER 0x05f0
@@ -439,8 +449,6 @@
#define IGU_REG_SISR_MDPC_WOMASK_UPPER 0x05f6
#define IGU_CMD_PROD_UPD_BASE 0x0600
-#define IGU_CMD_PROD_UPD_UPPER (IGU_CMD_PROD_UPD_BASE +\
- MAX_TOT_SB_PER_PATH - 1)
#define IGU_CMD_PROD_UPD_RESERVED_UPPER 0x07ff
/*****************/
@@ -652,8 +660,8 @@
#define PBF_MAX_CMD_LINES 3328
/* Number of BTB blocks. Each block is 256B. */
-#define BTB_MAX_BLOCKS 1440
-
+#define BTB_MAX_BLOCKS_BB 1440
+#define BTB_MAX_BLOCKS_K2 1840
/*****************/
/* PRS CONSTANTS */
/*****************/
@@ -730,6 +738,8 @@ enum protocol_type {
PROTOCOLID_PREROCE,
PROTOCOLID_COMMON,
PROTOCOLID_RESERVED1,
+ PROTOCOLID_RDMA,
+ PROTOCOLID_SCSI,
MAX_PROTOCOL_TYPE
};
@@ -750,6 +760,10 @@ union rdma_eqe_data {
struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
};
+struct tstorm_queue_zone {
+ __le32 reserved[2];
+};
+
/* Ustorm Queue Zone */
struct ustorm_eth_queue_zone {
struct coalescing_timeset int_coalescing_timeset;
@@ -872,8 +886,8 @@ struct db_l2_dpm_data {
#define DB_L2_DPM_DATA_RESERVED0_SHIFT 27
#define DB_L2_DPM_DATA_SGE_NUM_MASK 0x7
#define DB_L2_DPM_DATA_SGE_NUM_SHIFT 28
-#define DB_L2_DPM_DATA_GFS_SRC_EN_MASK 0x1
-#define DB_L2_DPM_DATA_GFS_SRC_EN_SHIFT 31
+#define DB_L2_DPM_DATA_TGFS_SRC_EN_MASK 0x1
+#define DB_L2_DPM_DATA_TGFS_SRC_EN_SHIFT 31
};
/* Structure for SGE in a DPM doorbell of type DPM_L2_BD */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index d9416ad5ef59..95f5fd615852 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -38,9 +38,11 @@
/********************/
#define ETH_HSI_VER_MAJOR 3
-#define ETH_HSI_VER_MINOR 10
+#define ETH_HSI_VER_MINOR 11
-#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
+#define ETH_HSI_VER_NO_PKT_LEN_TUNN 5
+/* Maximum number of pinned L2 connections (CIDs) */
+#define ETH_PINNED_CONN_MAX_NUM 32
#define ETH_CACHE_LINE_SIZE 64
#define ETH_RX_CQE_GAP 32
@@ -61,6 +63,7 @@
#define ETH_TX_MIN_BDS_PER_TUNN_IPV6_WITH_EXT_PKT 3
#define ETH_TX_MIN_BDS_PER_IPV6_WITH_EXT_PKT 2
#define ETH_TX_MIN_BDS_PER_PKT_W_LOOPBACK_MODE 2
+#define ETH_TX_MIN_BDS_PER_PKT_W_VPORT_FORWARDING 4
#define ETH_TX_MAX_NON_LSO_PKT_LEN (9700 - (4 + 4 + 12 + 8))
#define ETH_TX_MAX_LSO_HDR_BYTES 510
#define ETH_TX_LSO_WINDOW_BDS_NUM (18 - 1)
@@ -75,9 +78,8 @@
#define ETH_NUM_STATISTIC_COUNTERS_QUAD_VF_ZONE \
(ETH_NUM_STATISTIC_COUNTERS - 3 * MAX_NUM_VFS / 4)
-/* Maximum number of buffers, used for RX packet placement */
#define ETH_RX_MAX_BUFF_PER_PKT 5
-#define ETH_RX_BD_THRESHOLD 12
+#define ETH_RX_BD_THRESHOLD 16
/* Num of MAC/VLAN filters */
#define ETH_NUM_MAC_FILTERS 512
@@ -96,24 +98,24 @@
#define ETH_RSS_ENGINE_NUM_BB 127
/* TPA constants */
-#define ETH_TPA_MAX_AGGS_NUM 64
-#define ETH_TPA_CQE_START_LEN_LIST_SIZE ETH_RX_MAX_BUFF_PER_PKT
-#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
-#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
+#define ETH_TPA_MAX_AGGS_NUM 64
+#define ETH_TPA_CQE_START_BW_LEN_LIST_SIZE 2
+#define ETH_TPA_CQE_CONT_LEN_LIST_SIZE 6
+#define ETH_TPA_CQE_END_LEN_LIST_SIZE 4
/* Control frame check constants */
-#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
+#define ETH_CTL_FRAME_ETH_TYPE_NUM 4
/* GFS constants */
#define ETH_GFT_TRASHCAN_VPORT 0x1FF /* GFT drop flow vport number */
/* Destination port mode */
-enum dest_port_mode {
- DEST_PORT_PHY,
- DEST_PORT_LOOPBACK,
- DEST_PORT_PHY_LOOPBACK,
- DEST_PORT_DROP,
- MAX_DEST_PORT_MODE
+enum dst_port_mode {
+ DST_PORT_PHY,
+ DST_PORT_LOOPBACK,
+ DST_PORT_PHY_LOOPBACK,
+ DST_PORT_DROP,
+ MAX_DST_PORT_MODE
};
/* Ethernet address type */
@@ -167,8 +169,8 @@ struct eth_tx_data_2nd_bd {
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_L2_HDR_SIZE_W_SHIFT 0
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_MASK 0x3
#define ETH_TX_DATA_2ND_BD_TUNN_INNER_ETH_TYPE_SHIFT 4
-#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_MASK 0x3
-#define ETH_TX_DATA_2ND_BD_DEST_PORT_MODE_SHIFT 6
+#define ETH_TX_DATA_2ND_BD_DST_PORT_MODE_MASK 0x3
+#define ETH_TX_DATA_2ND_BD_DST_PORT_MODE_SHIFT 6
#define ETH_TX_DATA_2ND_BD_START_BD_MASK 0x1
#define ETH_TX_DATA_2ND_BD_START_BD_SHIFT 8
#define ETH_TX_DATA_2ND_BD_TUNN_TYPE_MASK 0x3
@@ -244,8 +246,9 @@ struct eth_fast_path_rx_reg_cqe {
struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 bd_num;
u8 reserved;
- __le16 flow_id;
- u8 reserved1[11];
+ __le16 reserved2;
+ __le32 flow_id_or_resource_id;
+ u8 reserved1[7];
struct eth_pmd_flow_flags pmd_flags;
};
@@ -296,9 +299,10 @@ struct eth_fast_path_rx_tpa_start_cqe {
struct eth_tunnel_parsing_flags tunnel_pars_flags;
u8 tpa_agg_index;
u8 header_len;
- __le16 ext_bd_len_list[ETH_TPA_CQE_START_LEN_LIST_SIZE];
- __le16 flow_id;
- u8 reserved;
+ __le16 bw_ext_bd_len_list[ETH_TPA_CQE_START_BW_LEN_LIST_SIZE];
+ __le16 reserved2;
+ __le32 flow_id_or_resource_id;
+ u8 reserved[3];
struct eth_pmd_flow_flags pmd_flags;
};
@@ -407,6 +411,29 @@ struct eth_tx_3rd_bd {
struct eth_tx_data_3rd_bd data;
};
+/* The parsing information data for the forth tx bd of a given packet. */
+struct eth_tx_data_4th_bd {
+ u8 dst_vport_id;
+ u8 reserved4;
+ __le16 bitfields;
+#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_MASK 0x1
+#define ETH_TX_DATA_4TH_BD_DST_VPORT_ID_VALID_SHIFT 0
+#define ETH_TX_DATA_4TH_BD_RESERVED1_MASK 0x7F
+#define ETH_TX_DATA_4TH_BD_RESERVED1_SHIFT 1
+#define ETH_TX_DATA_4TH_BD_START_BD_MASK 0x1
+#define ETH_TX_DATA_4TH_BD_START_BD_SHIFT 8
+#define ETH_TX_DATA_4TH_BD_RESERVED2_MASK 0x7F
+#define ETH_TX_DATA_4TH_BD_RESERVED2_SHIFT 9
+ __le16 reserved3;
+};
+
+/* The forth tx bd of a given packet */
+struct eth_tx_4th_bd {
+ struct regpair addr; /* Single continuous buffer */
+ __le16 nbytes; /* Number of bytes in this BD */
+ struct eth_tx_data_4th_bd data; /* Parsing information data */
+};
+
/* Complementary information for the regular tx bd of a given packet */
struct eth_tx_data_bd {
__le16 reserved0;
@@ -431,6 +458,7 @@ union eth_tx_bd_types {
struct eth_tx_1st_bd first_bd;
struct eth_tx_2nd_bd second_bd;
struct eth_tx_3rd_bd third_bd;
+ struct eth_tx_4th_bd fourth_bd;
struct eth_tx_bd reg_bd;
};
@@ -443,6 +471,12 @@ enum eth_tx_tunn_type {
MAX_ETH_TX_TUNN_TYPE
};
+/* Mstorm Queue Zone */
+struct mstorm_eth_queue_zone {
+ struct eth_rx_prod_data rx_producers;
+ __le32 reserved[3];
+};
+
/* Ystorm Queue Zone */
struct xstorm_eth_queue_zone {
struct coalescing_timeset int_coalescing_timeset;
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 66aba505ec56..2f0a771a9176 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -999,7 +999,6 @@ struct iscsi_conn_offload_params {
struct regpair r2tq_pbl_addr;
struct regpair xhq_pbl_addr;
struct regpair uhq_pbl_addr;
- __le32 initial_ack;
__le16 physical_q0;
__le16 physical_q1;
u8 flags;
@@ -1011,10 +1010,10 @@ struct iscsi_conn_offload_params {
#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F
#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3
- u8 pbl_page_size_log;
- u8 pbe_page_size_log;
u8 default_cq;
+ __le16 reserved0;
__le32 stat_sn;
+ __le32 initial_ack;
};
/* iSCSI connection statistics */
@@ -1029,25 +1028,14 @@ struct iscsi_conn_stats_params {
__le32 reserved;
};
-/* spe message header */
-struct iscsi_slow_path_hdr {
- u8 op_code;
- u8 flags;
-#define ISCSI_SLOW_PATH_HDR_RESERVED0_MASK 0xF
-#define ISCSI_SLOW_PATH_HDR_RESERVED0_SHIFT 0
-#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_MASK 0x7
-#define ISCSI_SLOW_PATH_HDR_LAYER_CODE_SHIFT 4
-#define ISCSI_SLOW_PATH_HDR_RESERVED1_MASK 0x1
-#define ISCSI_SLOW_PATH_HDR_RESERVED1_SHIFT 7
-};
/* iSCSI connection update params passed by driver to FW in ISCSI update
*ramrod.
*/
struct iscsi_conn_update_ramrod_params {
- struct iscsi_slow_path_hdr hdr;
+ __le16 reserved0;
__le16 conn_id;
- __le32 fw_cid;
+ __le32 reserved1;
u8 flags;
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_HD_EN_SHIFT 0
@@ -1065,7 +1053,7 @@ struct iscsi_conn_update_ramrod_params {
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_IMM_EN_SHIFT 6
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_LUN_MAPPER_EN_SHIFT 7
- u8 reserved0[3];
+ u8 reserved3[3];
__le32 max_seq_size;
__le32 max_send_pdu_length;
__le32 max_recv_pdu_length;
@@ -1251,22 +1239,22 @@ enum iscsi_ramrod_cmd_id {
/* iSCSI connection termination request */
struct iscsi_spe_conn_mac_update {
- struct iscsi_slow_path_hdr hdr;
+ __le16 reserved0;
__le16 conn_id;
- __le32 fw_cid;
+ __le32 reserved1;
__le16 remote_mac_addr_lo;
__le16 remote_mac_addr_mid;
__le16 remote_mac_addr_hi;
- u8 reserved0[2];
+ u8 reserved2[2];
};
/* iSCSI and TCP connection (Option 1) offload params passed by driver to FW in
* iSCSI offload ramrod.
*/
struct iscsi_spe_conn_offload {
- struct iscsi_slow_path_hdr hdr;
+ __le16 reserved0;
__le16 conn_id;
- __le32 fw_cid;
+ __le32 reserved1;
struct iscsi_conn_offload_params iscsi;
struct tcp_offload_params tcp;
};
@@ -1275,44 +1263,36 @@ struct iscsi_spe_conn_offload {
* iSCSI offload ramrod.
*/
struct iscsi_spe_conn_offload_option2 {
- struct iscsi_slow_path_hdr hdr;
+ __le16 reserved0;
__le16 conn_id;
- __le32 fw_cid;
+ __le32 reserved1;
struct iscsi_conn_offload_params iscsi;
struct tcp_offload_params_opt2 tcp;
};
/* iSCSI collect connection statistics request */
struct iscsi_spe_conn_statistics {
- struct iscsi_slow_path_hdr hdr;
+ __le16 reserved0;
__le16 conn_id;
- __le32 fw_cid;
+ __le32 reserved1;
u8 reset_stats;
- u8 reserved0[7];
+ u8 reserved2[7];
struct regpair stats_cnts_addr;
};
/* iSCSI connection termination request */
struct iscsi_spe_conn_termination {
- struct iscsi_slow_path_hdr hdr;
+ __le16 reserved0;
__le16 conn_id;
- __le32 fw_cid;
+ __le32 reserved1;
u8 abortive;
- u8 reserved0[7];
+ u8 reserved2[7];
struct regpair queue_cnts_addr;
struct regpair query_params_addr;
};
-/* iSCSI firmware function destroy parameters */
-struct iscsi_spe_func_dstry {
- struct iscsi_slow_path_hdr hdr;
- __le16 reserved0;
- __le32 reserved1;
-};
-
/* iSCSI firmware function init parameters */
struct iscsi_spe_func_init {
- struct iscsi_slow_path_hdr hdr;
__le16 half_way_close_timeout;
u8 num_sq_pages_in_ring;
u8 num_r2tq_pages_in_ring;
@@ -1324,8 +1304,12 @@ struct iscsi_spe_func_init {
#define ISCSI_SPE_FUNC_INIT_RESERVED0_MASK 0x7F
#define ISCSI_SPE_FUNC_INIT_RESERVED0_SHIFT 1
struct iscsi_debug_modes debug_mode;
- __le16 reserved1;
- __le32 reserved2;
+ u8 params;
+#define ISCSI_SPE_FUNC_INIT_MAX_SYN_RT_MASK 0xF
+#define ISCSI_SPE_FUNC_INIT_MAX_SYN_RT_SHIFT 0
+#define ISCSI_SPE_FUNC_INIT_RESERVED1_MASK 0xF
+#define ISCSI_SPE_FUNC_INIT_RESERVED1_SHIFT 4
+ u8 reserved2[7];
struct scsi_init_func_params func_params;
struct scsi_init_func_queues q_params;
};
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index b5db1ee96d78..8f29e0d8a7b3 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -159,6 +159,7 @@ struct qed_dcbx_get {
enum qed_nvm_images {
QED_NVM_IMAGE_ISCSI_CFG,
QED_NVM_IMAGE_FCOE_CFG,
+ QED_NVM_IMAGE_MDUMP,
QED_NVM_IMAGE_NVM_CFG1,
QED_NVM_IMAGE_DEFAULT_CFG,
QED_NVM_IMAGE_NVM_META,
@@ -463,7 +464,7 @@ enum qed_db_rec_space {
#define DIRECT_REG_RD(reg_addr) readl((void __iomem *)(reg_addr))
-#define DIRECT_REG_WR64(reg_addr, val) writeq((u32)val, \
+#define DIRECT_REG_WR64(reg_addr, val) writeq((u64)val, \
(void __iomem *)(reg_addr))
#define QED_COALESCE_MAX 0x1FF
@@ -1177,6 +1178,17 @@ struct qed_common_ops {
#define GET_FIELD(value, name) \
(((value) >> (name ## _SHIFT)) & name ## _MASK)
+#define GET_MFW_FIELD(name, field) \
+ (((name) & (field ## _MASK)) >> (field ## _OFFSET))
+
+#define SET_MFW_FIELD(name, field, value) \
+ do { \
+ (name) &= ~(field ## _MASK); \
+ (name) |= (((value) << (field ## _OFFSET)) & (field ## _MASK));\
+ } while (0)
+
+#define DB_ADDR_SHIFT(addr) ((addr) << DB_PWM_ADDR_OFFSET_SHIFT)
+
/* Debug print definitions */
#define DP_ERR(cdev, fmt, ...) \
do { \
diff --git a/include/linux/qed/qed_ll2_if.h b/include/linux/qed/qed_ll2_if.h
index 5eb022953aca..1313c34d9a68 100644
--- a/include/linux/qed/qed_ll2_if.h
+++ b/include/linux/qed/qed_ll2_if.h
@@ -52,6 +52,12 @@ enum qed_ll2_conn_type {
QED_LL2_TYPE_ROCE,
QED_LL2_TYPE_IWARP,
QED_LL2_TYPE_RESERVED3,
+ MAX_QED_LL2_CONN_TYPE
+};
+
+enum qed_ll2_rx_conn_type {
+ QED_LL2_RX_TYPE_LEGACY,
+ QED_LL2_RX_TYPE_CTX,
MAX_QED_LL2_RX_CONN_TYPE
};
@@ -165,6 +171,7 @@ struct qed_ll2_cbs {
};
struct qed_ll2_acquire_data_inputs {
+ enum qed_ll2_rx_conn_type rx_conn_type;
enum qed_ll2_conn_type conn_type;
u16 mtu;
u16 rx_num_desc;
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
index 505c0b48a761..9a973ffbbff5 100644
--- a/include/linux/qed/storage_common.h
+++ b/include/linux/qed/storage_common.h
@@ -107,8 +107,9 @@ struct scsi_drv_cmdq {
struct scsi_init_func_params {
__le16 num_tasks;
u8 log_page_size;
+ u8 log_page_size_conn;
u8 debug_mode;
- u8 reserved2[12];
+ u8 reserved2[11];
};
/* SCSI RQ/CQ/CMDQ firmware function init parameters */
diff --git a/include/linux/quota.h b/include/linux/quota.h
index f32dd270b8e3..27aab84fcbaa 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -263,7 +263,7 @@ enum {
};
struct dqstats {
- int stat[_DQST_DQSTAT_LAST];
+ unsigned long stat[_DQST_DQSTAT_LAST];
struct percpu_counter counter[_DQST_DQSTAT_LAST];
};
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 185d94829701..9cf0cd3dc88c 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -54,6 +54,16 @@ static inline struct dquot *dqgrab(struct dquot *dquot)
atomic_inc(&dquot->dq_count);
return dquot;
}
+
+static inline bool dquot_is_busy(struct dquot *dquot)
+{
+ if (test_bit(DQ_MOD_B, &dquot->dq_flags))
+ return true;
+ if (atomic_read(&dquot->dq_count) > 1)
+ return true;
+ return false;
+}
+
void dqput(struct dquot *dquot);
int dquot_scan_active(struct super_block *sb,
int (*fn)(struct dquot *dquot, unsigned long priv),
@@ -87,7 +97,9 @@ int dquot_mark_dquot_dirty(struct dquot *dquot);
int dquot_file_open(struct inode *inode, struct file *file);
-int dquot_enable(struct inode *inode, int type, int format_id,
+int dquot_load_quota_sb(struct super_block *sb, int type, int format_id,
+ unsigned int flags);
+int dquot_load_quota_inode(struct inode *inode, int type, int format_id,
unsigned int flags);
int dquot_quota_on(struct super_block *sb, int type, int format_id,
const struct path *path);
diff --git a/include/linux/raid/detect.h b/include/linux/raid/detect.h
new file mode 100644
index 000000000000..37dd3f40cd31
--- /dev/null
+++ b/include/linux/raid/detect.h
@@ -0,0 +1,3 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+void md_autodetect_dev(dev_t dev);
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 0832c9b66852..154e954b711d 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -27,8 +27,8 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
#include <errno.h>
#include <inttypes.h>
-#include <limits.h>
#include <stddef.h>
+#include <string.h>
#include <sys/mman.h>
#include <sys/time.h>
#include <sys/types.h>
@@ -44,6 +44,9 @@ typedef uint64_t u64;
#ifndef PAGE_SIZE
# define PAGE_SIZE 4096
#endif
+#ifndef PAGE_SHIFT
+# define PAGE_SHIFT 12
+#endif
extern const char raid6_empty_zero_page[PAGE_SIZE];
#define __init
@@ -59,7 +62,9 @@ extern const char raid6_empty_zero_page[PAGE_SIZE];
#define enable_kernel_altivec()
#define disable_kernel_altivec()
+#undef EXPORT_SYMBOL
#define EXPORT_SYMBOL(sym)
+#undef EXPORT_SYMBOL_GPL
#define EXPORT_SYMBOL_GPL(sym)
#define MODULE_LICENSE(licence)
#define MODULE_DESCRIPTION(desc)
diff --git a/include/linux/ramfs.h b/include/linux/ramfs.h
index b806a0ff6554..917528d102c4 100644
--- a/include/linux/ramfs.h
+++ b/include/linux/ramfs.h
@@ -2,6 +2,8 @@
#ifndef _LINUX_RAMFS_H
#define _LINUX_RAMFS_H
+#include <linux/fs_parser.h> // bleh...
+
struct inode *ramfs_get_inode(struct super_block *sb, const struct inode *dir,
umode_t mode, dev_t dev);
extern int ramfs_init_fs_context(struct fs_context *fc);
@@ -16,7 +18,7 @@ ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize)
extern int ramfs_nommu_expand_for_mapping(struct inode *inode, size_t newsize);
#endif
-extern const struct fs_parameter_description ramfs_fs_parameters;
+extern const struct fs_parameter_spec ramfs_fs_parameters[];
extern const struct file_operations ramfs_file_operations;
extern const struct vm_operations_struct generic_file_vm_ops;
diff --git a/include/linux/random.h b/include/linux/random.h
index f189c927fdea..d319f9a1e429 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -167,29 +167,21 @@ static inline void prandom_seed_state(struct rnd_state *state, u64 seed)
#ifdef CONFIG_ARCH_RANDOM
# include <asm/archrandom.h>
#else
-static inline bool arch_get_random_long(unsigned long *v)
+static inline bool __must_check arch_get_random_long(unsigned long *v)
{
- return 0;
+ return false;
}
-static inline bool arch_get_random_int(unsigned int *v)
+static inline bool __must_check arch_get_random_int(unsigned int *v)
{
- return 0;
+ return false;
}
-static inline bool arch_has_random(void)
+static inline bool __must_check arch_get_random_seed_long(unsigned long *v)
{
- return 0;
+ return false;
}
-static inline bool arch_get_random_seed_long(unsigned long *v)
+static inline bool __must_check arch_get_random_seed_int(unsigned int *v)
{
- return 0;
-}
-static inline bool arch_get_random_seed_int(unsigned int *v)
-{
- return 0;
-}
-static inline bool arch_has_random_seed(void)
-{
- return 0;
+ return false;
}
#endif
diff --git a/include/linux/rbtree_augmented.h b/include/linux/rbtree_augmented.h
index fdd421b8d9ae..724b0d036b57 100644
--- a/include/linux/rbtree_augmented.h
+++ b/include/linux/rbtree_augmented.h
@@ -283,14 +283,12 @@ __rb_erase_augmented(struct rb_node *node, struct rb_root *root,
__rb_change_child(node, successor, tmp, root);
if (child2) {
- successor->__rb_parent_color = pc;
rb_set_parent_color(child2, parent, RB_BLACK);
rebalance = NULL;
} else {
- unsigned long pc2 = successor->__rb_parent_color;
- successor->__rb_parent_color = pc;
- rebalance = __rb_is_black(pc2) ? parent : NULL;
+ rebalance = rb_is_black(successor) ? parent : NULL;
}
+ successor->__rb_parent_color = pc;
tmp = successor;
}
diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h
index 646759042333..b36afe7b22c9 100644
--- a/include/linux/rcu_segcblist.h
+++ b/include/linux/rcu_segcblist.h
@@ -22,7 +22,6 @@ struct rcu_cblist {
struct rcu_head *head;
struct rcu_head **tail;
long len;
- long len_lazy;
};
#define RCU_CBLIST_INITIALIZER(n) { .head = NULL, .tail = &n.head }
@@ -73,7 +72,6 @@ struct rcu_segcblist {
#else
long len;
#endif
- long len_lazy;
u8 enabled;
u8 offloaded;
};
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 4158b7212936..8214cdc715f2 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -40,6 +40,16 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
*/
#define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next)))
+/**
+ * list_tail_rcu - returns the prev pointer of the head of the list
+ * @head: the head of the list
+ *
+ * Note: This should only be used with the list header, and even then
+ * only if list_del() and similar primitives are not also used on the
+ * list header.
+ */
+#define list_tail_rcu(head) (*((struct list_head __rcu **)(&(head)->prev)))
+
/*
* Check during list traversal that we are within an RCU reader
*/
@@ -50,9 +60,9 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
#define __list_check_rcu(dummy, cond, extra...) \
({ \
check_arg_count_one(extra); \
- RCU_LOCKDEP_WARN(!cond && !rcu_read_lock_any_held(), \
+ RCU_LOCKDEP_WARN(!(cond) && !rcu_read_lock_any_held(), \
"RCU-list traversed in non-reader section!"); \
- })
+ })
#else
#define __list_check_rcu(dummy, cond, extra...) \
({ check_arg_count_one(extra); })
@@ -173,7 +183,7 @@ static inline void hlist_del_init_rcu(struct hlist_node *n)
{
if (!hlist_unhashed(n)) {
__hlist_del(n);
- n->pprev = NULL;
+ WRITE_ONCE(n->pprev, NULL);
}
}
@@ -361,7 +371,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the list_head within the struct.
- * @cond: optional lockdep expression if called from non-RCU protection.
+ * @cond...: optional lockdep expression if called from non-RCU protection.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as list_add_rcu()
@@ -473,7 +483,7 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
static inline void hlist_del_rcu(struct hlist_node *n)
{
__hlist_del(n);
- n->pprev = LIST_POISON2;
+ WRITE_ONCE(n->pprev, LIST_POISON2);
}
/**
@@ -489,11 +499,11 @@ static inline void hlist_replace_rcu(struct hlist_node *old,
struct hlist_node *next = old->next;
new->next = next;
- new->pprev = old->pprev;
+ WRITE_ONCE(new->pprev, old->pprev);
rcu_assign_pointer(*(struct hlist_node __rcu **)new->pprev, new);
if (next)
- new->next->pprev = &new->next;
- old->pprev = LIST_POISON2;
+ WRITE_ONCE(new->next->pprev, &new->next);
+ WRITE_ONCE(old->pprev, LIST_POISON2);
}
/*
@@ -528,10 +538,10 @@ static inline void hlist_add_head_rcu(struct hlist_node *n,
struct hlist_node *first = h->first;
n->next = first;
- n->pprev = &h->first;
+ WRITE_ONCE(n->pprev, &h->first);
rcu_assign_pointer(hlist_first_rcu(h), n);
if (first)
- first->pprev = &n->next;
+ WRITE_ONCE(first->pprev, &n->next);
}
/**
@@ -564,7 +574,7 @@ static inline void hlist_add_tail_rcu(struct hlist_node *n,
if (last) {
n->next = last->next;
- n->pprev = &last->next;
+ WRITE_ONCE(n->pprev, &last->next);
rcu_assign_pointer(hlist_next_rcu(last), n);
} else {
hlist_add_head_rcu(n, h);
@@ -592,10 +602,10 @@ static inline void hlist_add_tail_rcu(struct hlist_node *n,
static inline void hlist_add_before_rcu(struct hlist_node *n,
struct hlist_node *next)
{
- n->pprev = next->pprev;
+ WRITE_ONCE(n->pprev, next->pprev);
n->next = next;
rcu_assign_pointer(hlist_pprev_rcu(n), n);
- next->pprev = &n->next;
+ WRITE_ONCE(next->pprev, &n->next);
}
/**
@@ -620,10 +630,10 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
struct hlist_node *prev)
{
n->next = prev->next;
- n->pprev = &prev->next;
+ WRITE_ONCE(n->pprev, &prev->next);
rcu_assign_pointer(hlist_next_rcu(prev), n);
if (n->next)
- n->next->pprev = &n->next;
+ WRITE_ONCE(n->next->pprev, &n->next);
}
#define __hlist_for_each_rcu(pos, head) \
@@ -636,7 +646,7 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
* @pos: the type * to use as a loop cursor.
* @head: the head for your list.
* @member: the name of the hlist_node within the struct.
- * @cond: optional lockdep expression if called from non-RCU protection.
+ * @cond...: optional lockdep expression if called from non-RCU protection.
*
* This list-traversal primitive may safely run concurrently with
* the _rcu list-mutation primitives such as hlist_add_head_rcu()
diff --git a/include/linux/rculist_bl.h b/include/linux/rculist_bl.h
index 66e73ec1aa99..0b952d06eb0b 100644
--- a/include/linux/rculist_bl.h
+++ b/include/linux/rculist_bl.h
@@ -25,34 +25,6 @@ static inline struct hlist_bl_node *hlist_bl_first_rcu(struct hlist_bl_head *h)
}
/**
- * hlist_bl_del_init_rcu - deletes entry from hash list with re-initialization
- * @n: the element to delete from the hash list.
- *
- * Note: hlist_bl_unhashed() on the node returns true after this. It is
- * useful for RCU based read lockfree traversal if the writer side
- * must know if the list entry is still hashed or already unhashed.
- *
- * In particular, it means that we can not poison the forward pointers
- * that may still be used for walking the hash list and we can only
- * zero the pprev pointer so list_unhashed() will return true after
- * this.
- *
- * The caller must take whatever precautions are necessary (such as
- * holding appropriate locks) to avoid racing with another
- * list-mutation primitive, such as hlist_bl_add_head_rcu() or
- * hlist_bl_del_rcu(), running on this same list. However, it is
- * perfectly legal to run concurrently with the _rcu list-traversal
- * primitives, such as hlist_bl_for_each_entry_rcu().
- */
-static inline void hlist_bl_del_init_rcu(struct hlist_bl_node *n)
-{
- if (!hlist_bl_unhashed(n)) {
- __hlist_bl_del(n);
- n->pprev = NULL;
- }
-}
-
-/**
* hlist_bl_del_rcu - deletes entry from hash list without re-initialization
* @n: the element to delete from the hash list.
*
diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h
index bc8206a8f30e..9670b54b484a 100644
--- a/include/linux/rculist_nulls.h
+++ b/include/linux/rculist_nulls.h
@@ -34,13 +34,21 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
{
if (!hlist_nulls_unhashed(n)) {
__hlist_nulls_del(n);
- n->pprev = NULL;
+ WRITE_ONCE(n->pprev, NULL);
}
}
+/**
+ * hlist_nulls_first_rcu - returns the first element of the hash list.
+ * @head: the head of the list.
+ */
#define hlist_nulls_first_rcu(head) \
(*((struct hlist_nulls_node __rcu __force **)&(head)->first))
+/**
+ * hlist_nulls_next_rcu - returns the element of the list after @node.
+ * @node: element of the list.
+ */
#define hlist_nulls_next_rcu(node) \
(*((struct hlist_nulls_node __rcu __force **)&(node)->next))
@@ -66,7 +74,7 @@ static inline void hlist_nulls_del_init_rcu(struct hlist_nulls_node *n)
static inline void hlist_nulls_del_rcu(struct hlist_nulls_node *n)
{
__hlist_nulls_del(n);
- n->pprev = LIST_POISON2;
+ WRITE_ONCE(n->pprev, LIST_POISON2);
}
/**
@@ -94,17 +102,61 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
struct hlist_nulls_node *first = h->first;
n->next = first;
- n->pprev = &h->first;
+ WRITE_ONCE(n->pprev, &h->first);
rcu_assign_pointer(hlist_nulls_first_rcu(h), n);
if (!is_a_nulls(first))
- first->pprev = &n->next;
+ WRITE_ONCE(first->pprev, &n->next);
+}
+
+/**
+ * hlist_nulls_add_tail_rcu
+ * @n: the element to add to the hash list.
+ * @h: the list to add to.
+ *
+ * Description:
+ * Adds the specified element to the specified hlist_nulls,
+ * while permitting racing traversals.
+ *
+ * The caller must take whatever precautions are necessary
+ * (such as holding appropriate locks) to avoid racing
+ * with another list-mutation primitive, such as hlist_nulls_add_head_rcu()
+ * or hlist_nulls_del_rcu(), running on this same list.
+ * However, it is perfectly legal to run concurrently with
+ * the _rcu list-traversal primitives, such as
+ * hlist_nulls_for_each_entry_rcu(), used to prevent memory-consistency
+ * problems on Alpha CPUs. Regardless of the type of CPU, the
+ * list-traversal primitive must be guarded by rcu_read_lock().
+ */
+static inline void hlist_nulls_add_tail_rcu(struct hlist_nulls_node *n,
+ struct hlist_nulls_head *h)
+{
+ struct hlist_nulls_node *i, *last = NULL;
+
+ /* Note: write side code, so rcu accessors are not needed. */
+ for (i = h->first; !is_a_nulls(i); i = i->next)
+ last = i;
+
+ if (last) {
+ n->next = last->next;
+ n->pprev = &last->next;
+ rcu_assign_pointer(hlist_next_rcu(last), n);
+ } else {
+ hlist_nulls_add_head_rcu(n, h);
+ }
+}
+
+/* after that hlist_nulls_del will work */
+static inline void hlist_nulls_add_fake(struct hlist_nulls_node *n)
+{
+ n->pprev = &n->next;
+ n->next = (struct hlist_nulls_node *)NULLS_MARKER(NULL);
}
/**
* hlist_nulls_for_each_entry_rcu - iterate over rcu list of given type
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_nulls_node to use as a loop cursor.
- * @head: the head for your list.
+ * @head: the head of the list.
* @member: the name of the hlist_nulls_node within the struct.
*
* The barrier() is needed to make sure compiler doesn't cache first element [1],
@@ -124,7 +176,7 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n,
* iterate over list of given type safe against removal of list entry
* @tpos: the type * to use as a loop cursor.
* @pos: the &struct hlist_nulls_node to use as a loop cursor.
- * @head: the head for your list.
+ * @head: the head of the list.
* @member: the name of the hlist_nulls_node within the struct.
*/
#define hlist_nulls_for_each_entry_safe(tpos, pos, head, member) \
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 75a2eded7aa2..2678a37c3169 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -154,7 +154,7 @@ static inline void exit_tasks_rcu_finish(void) { }
*
* This macro resembles cond_resched(), except that it is defined to
* report potential quiescent states to RCU-tasks even if the cond_resched()
- * machinery were to be shut off, as some advocate for PREEMPT kernels.
+ * machinery were to be shut off, as some advocate for PREEMPTION kernels.
*/
#define cond_resched_tasks_rcu_qs() \
do { \
@@ -167,7 +167,7 @@ do { \
* TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
*/
-#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
+#if defined(CONFIG_TREE_RCU)
#include <linux/rcutree.h>
#elif defined(CONFIG_TINY_RCU)
#include <linux/rcutiny.h>
@@ -210,7 +210,7 @@ static inline void rcu_lock_acquire(struct lockdep_map *map)
static inline void rcu_lock_release(struct lockdep_map *map)
{
- lock_release(map, 1, _THIS_IP_);
+ lock_release(map, _THIS_IP_);
}
extern struct lockdep_map rcu_lock_map;
@@ -383,20 +383,22 @@ do { \
} while (0)
/**
- * rcu_swap_protected() - swap an RCU and a regular pointer
- * @rcu_ptr: RCU pointer
+ * rcu_replace_pointer() - replace an RCU pointer, returning its old value
+ * @rcu_ptr: RCU pointer, whose old value is returned
* @ptr: regular pointer
- * @c: the conditions under which the dereference will take place
+ * @c: the lockdep conditions under which the dereference will take place
*
- * Perform swap(@rcu_ptr, @ptr) where @rcu_ptr is an RCU-annotated pointer and
- * @c is the argument that is passed to the rcu_dereference_protected() call
- * used to read that pointer.
+ * Perform a replacement, where @rcu_ptr is an RCU-annotated
+ * pointer and @c is the lockdep argument that is passed to the
+ * rcu_dereference_protected() call used to read that pointer. The old
+ * value of @rcu_ptr is returned, and @rcu_ptr is set to @ptr.
*/
-#define rcu_swap_protected(rcu_ptr, ptr, c) do { \
+#define rcu_replace_pointer(rcu_ptr, ptr, c) \
+({ \
typeof(ptr) __tmp = rcu_dereference_protected((rcu_ptr), (c)); \
rcu_assign_pointer((rcu_ptr), (ptr)); \
- (ptr) = __tmp; \
-} while (0)
+ __tmp; \
+})
/**
* rcu_access_pointer() - fetch RCU pointer with no dereferencing
@@ -580,10 +582,10 @@ do { \
*
* You can avoid reading and understanding the next paragraph by
* following this rule: don't put anything in an rcu_read_lock() RCU
- * read-side critical section that would block in a !PREEMPT kernel.
+ * read-side critical section that would block in a !PREEMPTION kernel.
* But if you want the full story, read on!
*
- * In non-preemptible RCU implementations (TREE_RCU and TINY_RCU),
+ * In non-preemptible RCU implementations (pure TREE_RCU and TINY_RCU),
* it is illegal to block while in an RCU read-side critical section.
* In preemptible RCU implementations (PREEMPT_RCU) in CONFIG_PREEMPTION
* kernel builds, RCU read-side critical sections may be preempted,
@@ -894,4 +896,8 @@ rcu_head_after_call_rcu(struct rcu_head *rhp, rcu_callback_t f)
return false;
}
+/* kernel/ksysfs.c definitions */
+extern int rcu_expedited;
+extern int rcu_normal;
+
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 9bf1dfe7781f..045c28b71f4f 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -83,7 +83,10 @@ void rcu_scheduler_starting(void);
static inline void rcu_scheduler_starting(void) { }
#endif /* #else #ifndef CONFIG_SRCU */
static inline void rcu_end_inkernel_boot(void) { }
+static inline bool rcu_inkernel_boot_has_ended(void) { return true; }
static inline bool rcu_is_watching(void) { return true; }
+static inline void rcu_momentary_dyntick_idle(void) { }
+static inline void kfree_rcu_scheduler_running(void) { }
/* Avoid RCU read-side critical sections leaking across. */
static inline void rcu_all_qs(void) { barrier(); }
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 18b1ed9864b0..45f3f66bb04d 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -37,6 +37,8 @@ void kfree_call_rcu(struct rcu_head *head, rcu_callback_t func);
void rcu_barrier(void);
bool rcu_eqs_special_set(int cpu);
+void rcu_momentary_dyntick_idle(void);
+void kfree_rcu_scheduler_running(void);
unsigned long get_state_synchronize_rcu(void);
void cond_synchronize_rcu(unsigned long oldstate);
@@ -52,6 +54,7 @@ void exit_rcu(void);
void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __read_mostly;
void rcu_end_inkernel_boot(void);
+bool rcu_inkernel_boot_has_ended(void);
bool rcu_is_watching(void);
#ifndef CONFIG_PREEMPTION
void rcu_all_qs(void);
diff --git a/include/linux/rcuwait.h b/include/linux/rcuwait.h
index 75c97e4bbc57..2ffe1ee6d482 100644
--- a/include/linux/rcuwait.h
+++ b/include/linux/rcuwait.h
@@ -3,6 +3,7 @@
#define _LINUX_RCUWAIT_H_
#include <linux/rcupdate.h>
+#include <linux/sched/signal.h>
/*
* rcuwait provides a way of blocking and waking up a single
@@ -30,23 +31,30 @@ extern void rcuwait_wake_up(struct rcuwait *w);
* The caller is responsible for locking around rcuwait_wait_event(),
* such that writes to @task are properly serialized.
*/
-#define rcuwait_wait_event(w, condition) \
+#define rcuwait_wait_event(w, condition, state) \
({ \
+ int __ret = 0; \
rcu_assign_pointer((w)->task, current); \
for (;;) { \
/* \
* Implicit barrier (A) pairs with (B) in \
* rcuwait_wake_up(). \
*/ \
- set_current_state(TASK_UNINTERRUPTIBLE); \
+ set_current_state(state); \
if (condition) \
break; \
\
+ if (signal_pending_state(state, current)) { \
+ __ret = -EINTR; \
+ break; \
+ } \
+ \
schedule(); \
} \
\
WRITE_ONCE((w)->task, NULL); \
__set_current_state(TASK_RUNNING); \
+ __ret; \
})
#endif /* _LINUX_RCUWAIT_H_ */
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index e28cce21bad6..0ac50cf62d06 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -1,9 +1,88 @@
/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Variant of atomic_t specialized for reference counts.
+ *
+ * The interface matches the atomic_t interface (to aid in porting) but only
+ * provides the few functions one should use for reference counting.
+ *
+ * Saturation semantics
+ * ====================
+ *
+ * refcount_t differs from atomic_t in that the counter saturates at
+ * REFCOUNT_SATURATED and will not move once there. This avoids wrapping the
+ * counter and causing 'spurious' use-after-free issues. In order to avoid the
+ * cost associated with introducing cmpxchg() loops into all of the saturating
+ * operations, we temporarily allow the counter to take on an unchecked value
+ * and then explicitly set it to REFCOUNT_SATURATED on detecting that underflow
+ * or overflow has occurred. Although this is racy when multiple threads
+ * access the refcount concurrently, by placing REFCOUNT_SATURATED roughly
+ * equidistant from 0 and INT_MAX we minimise the scope for error:
+ *
+ * INT_MAX REFCOUNT_SATURATED UINT_MAX
+ * 0 (0x7fff_ffff) (0xc000_0000) (0xffff_ffff)
+ * +--------------------------------+----------------+----------------+
+ * <---------- bad value! ---------->
+ *
+ * (in a signed view of the world, the "bad value" range corresponds to
+ * a negative counter value).
+ *
+ * As an example, consider a refcount_inc() operation that causes the counter
+ * to overflow:
+ *
+ * int old = atomic_fetch_add_relaxed(r);
+ * // old is INT_MAX, refcount now INT_MIN (0x8000_0000)
+ * if (old < 0)
+ * atomic_set(r, REFCOUNT_SATURATED);
+ *
+ * If another thread also performs a refcount_inc() operation between the two
+ * atomic operations, then the count will continue to edge closer to 0. If it
+ * reaches a value of 1 before /any/ of the threads reset it to the saturated
+ * value, then a concurrent refcount_dec_and_test() may erroneously free the
+ * underlying object. Given the precise timing details involved with the
+ * round-robin scheduling of each thread manipulating the refcount and the need
+ * to hit the race multiple times in succession, there doesn't appear to be a
+ * practical avenue of attack even if using refcount_add() operations with
+ * larger increments.
+ *
+ * Memory ordering
+ * ===============
+ *
+ * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
+ * and provide only what is strictly required for refcounts.
+ *
+ * The increments are fully relaxed; these will not provide ordering. The
+ * rationale is that whatever is used to obtain the object we're increasing the
+ * reference count on will provide the ordering. For locked data structures,
+ * its the lock acquire, for RCU/lockless data structures its the dependent
+ * load.
+ *
+ * Do note that inc_not_zero() provides a control dependency which will order
+ * future stores against the inc, this ensures we'll never modify the object
+ * if we did not in fact acquire a reference.
+ *
+ * The decrements will provide release order, such that all the prior loads and
+ * stores will be issued before, it also provides a control dependency, which
+ * will order us against the subsequent free().
+ *
+ * The control dependency is against the load of the cmpxchg (ll/sc) that
+ * succeeded. This means the stores aren't fully ordered, but this is fine
+ * because the 1->0 transition indicates no concurrency.
+ *
+ * Note that the allocator is responsible for ordering things between free()
+ * and alloc().
+ *
+ * The decrements dec_and_test() and sub_and_test() also provide acquire
+ * ordering on success.
+ *
+ */
+
#ifndef _LINUX_REFCOUNT_H
#define _LINUX_REFCOUNT_H
#include <linux/atomic.h>
+#include <linux/bug.h>
#include <linux/compiler.h>
+#include <linux/limits.h>
#include <linux/spinlock_types.h>
struct mutex;
@@ -12,7 +91,7 @@ struct mutex;
* struct refcount_t - variant of atomic_t specialized for reference counts
* @refs: atomic_t counter field
*
- * The counter saturates at UINT_MAX and will not move once
+ * The counter saturates at REFCOUNT_SATURATED and will not move once
* there. This avoids wrapping the counter and causing 'spurious'
* use-after-free bugs.
*/
@@ -21,13 +100,25 @@ typedef struct refcount_struct {
} refcount_t;
#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
+#define REFCOUNT_MAX INT_MAX
+#define REFCOUNT_SATURATED (INT_MIN / 2)
+
+enum refcount_saturation_type {
+ REFCOUNT_ADD_NOT_ZERO_OVF,
+ REFCOUNT_ADD_OVF,
+ REFCOUNT_ADD_UAF,
+ REFCOUNT_SUB_UAF,
+ REFCOUNT_DEC_LEAK,
+};
+
+void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t);
/**
* refcount_set - set a refcount's value
* @r: the refcount
* @n: value to which the refcount will be set
*/
-static inline void refcount_set(refcount_t *r, unsigned int n)
+static inline void refcount_set(refcount_t *r, int n)
{
atomic_set(&r->refs, n);
}
@@ -43,70 +134,168 @@ static inline unsigned int refcount_read(const refcount_t *r)
return atomic_read(&r->refs);
}
-extern __must_check bool refcount_add_not_zero_checked(unsigned int i, refcount_t *r);
-extern void refcount_add_checked(unsigned int i, refcount_t *r);
-
-extern __must_check bool refcount_inc_not_zero_checked(refcount_t *r);
-extern void refcount_inc_checked(refcount_t *r);
-
-extern __must_check bool refcount_sub_and_test_checked(unsigned int i, refcount_t *r);
-
-extern __must_check bool refcount_dec_and_test_checked(refcount_t *r);
-extern void refcount_dec_checked(refcount_t *r);
-
-#ifdef CONFIG_REFCOUNT_FULL
-
-#define refcount_add_not_zero refcount_add_not_zero_checked
-#define refcount_add refcount_add_checked
-
-#define refcount_inc_not_zero refcount_inc_not_zero_checked
-#define refcount_inc refcount_inc_checked
+/**
+ * refcount_add_not_zero - add a value to a refcount unless it is 0
+ * @i: the value to add to the refcount
+ * @r: the refcount
+ *
+ * Will saturate at REFCOUNT_SATURATED and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_inc(), or one of its variants, should instead be used to
+ * increment a reference count.
+ *
+ * Return: false if the passed refcount is 0, true otherwise
+ */
+static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
+{
+ int old = refcount_read(r);
-#define refcount_sub_and_test refcount_sub_and_test_checked
+ do {
+ if (!old)
+ break;
+ } while (!atomic_try_cmpxchg_relaxed(&r->refs, &old, old + i));
-#define refcount_dec_and_test refcount_dec_and_test_checked
-#define refcount_dec refcount_dec_checked
+ if (unlikely(old < 0 || old + i < 0))
+ refcount_warn_saturate(r, REFCOUNT_ADD_NOT_ZERO_OVF);
-#else
-# ifdef CONFIG_ARCH_HAS_REFCOUNT
-# include <asm/refcount.h>
-# else
-static inline __must_check bool refcount_add_not_zero(unsigned int i, refcount_t *r)
-{
- return atomic_add_unless(&r->refs, i, 0);
+ return old;
}
-static inline void refcount_add(unsigned int i, refcount_t *r)
+/**
+ * refcount_add - add a value to a refcount
+ * @i: the value to add to the refcount
+ * @r: the refcount
+ *
+ * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_inc(), or one of its variants, should instead be used to
+ * increment a reference count.
+ */
+static inline void refcount_add(int i, refcount_t *r)
{
- atomic_add(i, &r->refs);
+ int old = atomic_fetch_add_relaxed(i, &r->refs);
+
+ if (unlikely(!old))
+ refcount_warn_saturate(r, REFCOUNT_ADD_UAF);
+ else if (unlikely(old < 0 || old + i < 0))
+ refcount_warn_saturate(r, REFCOUNT_ADD_OVF);
}
+/**
+ * refcount_inc_not_zero - increment a refcount unless it is 0
+ * @r: the refcount to increment
+ *
+ * Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED
+ * and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller has guaranteed the
+ * object memory to be stable (RCU, etc.). It does provide a control dependency
+ * and thereby orders future stores. See the comment on top.
+ *
+ * Return: true if the increment was successful, false otherwise
+ */
static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
{
- return atomic_add_unless(&r->refs, 1, 0);
+ return refcount_add_not_zero(1, r);
}
+/**
+ * refcount_inc - increment a refcount
+ * @r: the refcount to increment
+ *
+ * Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN.
+ *
+ * Provides no memory ordering, it is assumed the caller already has a
+ * reference on the object.
+ *
+ * Will WARN if the refcount is 0, as this represents a possible use-after-free
+ * condition.
+ */
static inline void refcount_inc(refcount_t *r)
{
- atomic_inc(&r->refs);
+ refcount_add(1, r);
}
-static inline __must_check bool refcount_sub_and_test(unsigned int i, refcount_t *r)
+/**
+ * refcount_sub_and_test - subtract from a refcount and test if it is 0
+ * @i: amount to subtract from the refcount
+ * @r: the refcount
+ *
+ * Similar to atomic_dec_and_test(), but it will WARN, return false and
+ * ultimately leak on underflow and will fail to decrement when saturated
+ * at REFCOUNT_SATURATED.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides an acquire ordering on success such that free()
+ * must come after.
+ *
+ * Use of this function is not recommended for the normal reference counting
+ * use case in which references are taken and released one at a time. In these
+ * cases, refcount_dec(), or one of its variants, should instead be used to
+ * decrement a reference count.
+ *
+ * Return: true if the resulting refcount is 0, false otherwise
+ */
+static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
{
- return atomic_sub_and_test(i, &r->refs);
+ int old = atomic_fetch_sub_release(i, &r->refs);
+
+ if (old == i) {
+ smp_acquire__after_ctrl_dep();
+ return true;
+ }
+
+ if (unlikely(old < 0 || old - i < 0))
+ refcount_warn_saturate(r, REFCOUNT_SUB_UAF);
+
+ return false;
}
+/**
+ * refcount_dec_and_test - decrement a refcount and test if it is 0
+ * @r: the refcount
+ *
+ * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
+ * decrement when saturated at REFCOUNT_SATURATED.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before, and provides an acquire ordering on success such that free()
+ * must come after.
+ *
+ * Return: true if the resulting refcount is 0, false otherwise
+ */
static inline __must_check bool refcount_dec_and_test(refcount_t *r)
{
- return atomic_dec_and_test(&r->refs);
+ return refcount_sub_and_test(1, r);
}
+/**
+ * refcount_dec - decrement a refcount
+ * @r: the refcount
+ *
+ * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
+ * when saturated at REFCOUNT_SATURATED.
+ *
+ * Provides release memory ordering, such that prior loads and stores are done
+ * before.
+ */
static inline void refcount_dec(refcount_t *r)
{
- atomic_dec(&r->refs);
+ if (unlikely(atomic_fetch_sub_release(1, &r->refs) <= 1))
+ refcount_warn_saturate(r, REFCOUNT_DEC_LEAK);
}
-# endif /* !CONFIG_ARCH_HAS_REFCOUNT */
-#endif /* CONFIG_REFCOUNT_FULL */
extern __must_check bool refcount_dec_if_one(refcount_t *r);
extern __must_check bool refcount_dec_not_one(refcount_t *r);
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index dfe493ac692d..40b07168fd8e 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -145,6 +145,51 @@ struct reg_sequence {
})
/**
+ * regmap_read_poll_timeout_atomic - Poll until a condition is met or a timeout occurs
+ *
+ * @map: Regmap to read from
+ * @addr: Address to poll
+ * @val: Unsigned integer variable to read the value into
+ * @cond: Break condition (usually involving @val)
+ * @delay_us: Time to udelay between reads in us (0 tight-loops).
+ * Should be less than ~10us since udelay is used
+ * (see Documentation/timers/timers-howto.rst).
+ * @timeout_us: Timeout in us, 0 means never timeout
+ *
+ * Returns 0 on success and -ETIMEDOUT upon a timeout or the regmap_read
+ * error return value in case of a error read. In the two former cases,
+ * the last read value at @addr is stored in @val.
+ *
+ * This is modelled after the readx_poll_timeout_atomic macros in linux/iopoll.h.
+ *
+ * Note: In general regmap cannot be used in atomic context. If you want to use
+ * this macro then first setup your regmap for atomic use (flat or no cache
+ * and MMIO regmap).
+ */
+#define regmap_read_poll_timeout_atomic(map, addr, val, cond, delay_us, timeout_us) \
+({ \
+ u64 __timeout_us = (timeout_us); \
+ unsigned long __delay_us = (delay_us); \
+ ktime_t __timeout = ktime_add_us(ktime_get(), __timeout_us); \
+ int __ret; \
+ for (;;) { \
+ __ret = regmap_read((map), (addr), &(val)); \
+ if (__ret) \
+ break; \
+ if (cond) \
+ break; \
+ if ((__timeout_us) && \
+ ktime_compare(ktime_get(), __timeout) > 0) { \
+ __ret = regmap_read((map), (addr), &(val)); \
+ break; \
+ } \
+ if (__delay_us) \
+ udelay(__delay_us); \
+ } \
+ __ret ?: ((cond) ? 0 : -ETIMEDOUT); \
+})
+
+/**
* regmap_field_read_poll_timeout - Poll until a condition is met or timeout
*
* @field: Regmap field to read from
@@ -416,8 +461,8 @@ struct regmap_config {
* @range_max: Address of the highest register in virtual range.
*
* @selector_reg: Register with selector field.
- * @selector_mask: Bit shift for selector value.
- * @selector_shift: Bit mask for selector value.
+ * @selector_mask: Bit mask for selector value.
+ * @selector_shift: Bit shift for selector value.
*
* @window_start: Address of first (lowest) register in data window.
* @window_len: Number of registers in data window.
diff --git a/include/linux/regulator/ab8500.h b/include/linux/regulator/ab8500.h
index 7cf8f797e13a..3ab1ddf151a2 100644
--- a/include/linux/regulator/ab8500.h
+++ b/include/linux/regulator/ab8500.h
@@ -37,14 +37,11 @@ enum ab8505_regulator_id {
AB8505_LDO_AUX6,
AB8505_LDO_INTCORE,
AB8505_LDO_ADC,
- AB8505_LDO_USB,
AB8505_LDO_AUDIO,
AB8505_LDO_ANAMIC1,
AB8505_LDO_ANAMIC2,
AB8505_LDO_AUX8,
AB8505_LDO_ANA,
- AB8505_SYSCLKREQ_2,
- AB8505_SYSCLKREQ_4,
AB8505_NUM_REGULATORS,
};
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index 337a46391527..6a92fd3105a3 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -287,6 +287,8 @@ void regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers,
const char *const *supply_names,
unsigned int num_supplies);
+bool regulator_is_equal(struct regulator *reg1, struct regulator *reg2);
+
#else
/*
@@ -593,6 +595,11 @@ regulator_bulk_set_supply_names(struct regulator_bulk_data *consumers,
{
}
+static inline bool
+regulator_is_equal(struct regulator *reg1, struct regulator *reg2)
+{
+ return false;
+}
#endif
static inline int regulator_set_voltage_triplet(struct regulator *regulator,
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index 9a911bb5fb61..29d920516e0b 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -277,9 +277,9 @@ enum regulator_type {
* @curr_table: Current limit mapping table (if table based mapping)
*
* @vsel_range_reg: Register for range selector when using pickable ranges
- * and regulator_regmap_X_voltage_X_pickable functions.
+ * and ``regulator_map_*_voltage_*_pickable`` functions.
* @vsel_range_mask: Mask for register bitfield used for range selector
- * @vsel_reg: Register for selector when using regulator_regmap_X_voltage_
+ * @vsel_reg: Register for selector when using ``regulator_map_*_voltage_*``
* @vsel_mask: Mask for register bitfield used for selector
* @vsel_step: Specify the resolution of selector stepping when setting
* voltage. If 0, then no stepping is done (requested selector is
diff --git a/include/linux/regulator/fixed.h b/include/linux/regulator/fixed.h
index d44ce5f18a56..55319943fcc5 100644
--- a/include/linux/regulator/fixed.h
+++ b/include/linux/regulator/fixed.h
@@ -36,6 +36,7 @@ struct fixed_voltage_config {
const char *input_supply;
int microvolts;
unsigned startup_delay;
+ unsigned int off_on_delay;
unsigned enabled_at_boot:1;
struct regulator_init_data *init_data;
};
diff --git a/include/linux/remoteproc/mtk_scp.h b/include/linux/remoteproc/mtk_scp.h
new file mode 100644
index 000000000000..b47416f7aeb8
--- /dev/null
+++ b/include/linux/remoteproc/mtk_scp.h
@@ -0,0 +1,66 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2019 MediaTek Inc.
+ */
+
+#ifndef _MTK_SCP_H
+#define _MTK_SCP_H
+
+#include <linux/platform_device.h>
+
+typedef void (*scp_ipi_handler_t) (void *data,
+ unsigned int len,
+ void *priv);
+struct mtk_scp;
+
+/**
+ * enum ipi_id - the id of inter-processor interrupt
+ *
+ * @SCP_IPI_INIT: The interrupt from scp is to notfiy kernel
+ * SCP initialization completed.
+ * IPI_SCP_INIT is sent from SCP when firmware is
+ * loaded. AP doesn't need to send IPI_SCP_INIT
+ * command to SCP.
+ * For other IPI below, AP should send the request
+ * to SCP to trigger the interrupt.
+ * @SCP_IPI_MAX: The maximum IPI number
+ */
+
+enum scp_ipi_id {
+ SCP_IPI_INIT = 0,
+ SCP_IPI_VDEC_H264,
+ SCP_IPI_VDEC_VP8,
+ SCP_IPI_VDEC_VP9,
+ SCP_IPI_VENC_H264,
+ SCP_IPI_VENC_VP8,
+ SCP_IPI_MDP_INIT,
+ SCP_IPI_MDP_DEINIT,
+ SCP_IPI_MDP_FRAME,
+ SCP_IPI_DIP,
+ SCP_IPI_ISP_CMD,
+ SCP_IPI_ISP_FRAME,
+ SCP_IPI_FD_CMD,
+ SCP_IPI_CROS_HOST_CMD,
+ SCP_IPI_NS_SERVICE = 0xFF,
+ SCP_IPI_MAX = 0x100,
+};
+
+struct mtk_scp *scp_get(struct platform_device *pdev);
+void scp_put(struct mtk_scp *scp);
+
+struct device *scp_get_device(struct mtk_scp *scp);
+struct rproc *scp_get_rproc(struct mtk_scp *scp);
+
+int scp_ipi_register(struct mtk_scp *scp, u32 id, scp_ipi_handler_t handler,
+ void *priv);
+void scp_ipi_unregister(struct mtk_scp *scp, u32 id);
+
+int scp_ipi_send(struct mtk_scp *scp, u32 id, void *buf, unsigned int len,
+ unsigned int wait);
+
+unsigned int scp_get_vdec_hw_capa(struct mtk_scp *scp);
+unsigned int scp_get_venc_hw_capa(struct mtk_scp *scp);
+
+void *scp_mapping_dm_addr(struct mtk_scp *scp, u32 mem_addr);
+
+#endif /* _MTK_SCP_H */
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
new file mode 100644
index 000000000000..daf5cf64c6a6
--- /dev/null
+++ b/include/linux/resctrl.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _RESCTRL_H
+#define _RESCTRL_H
+
+#ifdef CONFIG_PROC_CPU_RESCTRL
+
+int proc_resctrl_show(struct seq_file *m,
+ struct pid_namespace *ns,
+ struct pid *pid,
+ struct task_struct *tsk);
+
+#endif
+
+#endif /* _RESCTRL_H */
diff --git a/include/linux/reset-controller.h b/include/linux/reset-controller.h
index eaae6b4e9f24..ec35814e0bbb 100644
--- a/include/linux/reset-controller.h
+++ b/include/linux/reset-controller.h
@@ -62,7 +62,8 @@ struct reset_control_lookup {
* @of_node: corresponding device tree node as phandle target
* @of_reset_n_cells: number of cells in reset line specifiers
* @of_xlate: translation function to translate from specifier as found in the
- * device tree to id as given to the reset control ops
+ * device tree to id as given to the reset control ops, defaults
+ * to :c:func:`of_reset_simple_xlate`.
* @nr_resets: number of reset controls in this reset controller device
*/
struct reset_controller_dev {
diff --git a/include/linux/reset.h b/include/linux/reset.h
index eb597e8aa430..05aa9f440f48 100644
--- a/include/linux/reset.h
+++ b/include/linux/reset.h
@@ -203,12 +203,34 @@ static inline struct reset_control *reset_control_get_shared(
return __reset_control_get(dev, id, 0, true, false, false);
}
+/**
+ * reset_control_get_optional_exclusive - optional reset_control_get_exclusive()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Optional variant of reset_control_get_exclusive(). If the requested reset
+ * is not specified in the device tree, this function returns NULL instead of
+ * an error.
+ *
+ * See reset_control_get_exclusive() for more information.
+ */
static inline struct reset_control *reset_control_get_optional_exclusive(
struct device *dev, const char *id)
{
return __reset_control_get(dev, id, 0, false, true, true);
}
+/**
+ * reset_control_get_optional_shared - optional reset_control_get_shared()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Optional variant of reset_control_get_shared(). If the requested reset
+ * is not specified in the device tree, this function returns NULL instead of
+ * an error.
+ *
+ * See reset_control_get_shared() for more information.
+ */
static inline struct reset_control *reset_control_get_optional_shared(
struct device *dev, const char *id)
{
@@ -354,12 +376,36 @@ static inline struct reset_control *devm_reset_control_get_shared(
return __devm_reset_control_get(dev, id, 0, true, false, false);
}
+/**
+ * devm_reset_control_get_optional_exclusive - resource managed
+ * reset_control_get_optional_exclusive()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get_optional_exclusive(). For reset controllers
+ * returned from this function, reset_control_put() is called automatically on
+ * driver detach.
+ *
+ * See reset_control_get_optional_exclusive() for more information.
+ */
static inline struct reset_control *devm_reset_control_get_optional_exclusive(
struct device *dev, const char *id)
{
return __devm_reset_control_get(dev, id, 0, false, true, true);
}
+/**
+ * devm_reset_control_get_optional_shared - resource managed
+ * reset_control_get_optional_shared()
+ * @dev: device to be reset by the controller
+ * @id: reset line name
+ *
+ * Managed reset_control_get_optional_shared(). For reset controllers returned
+ * from this function, reset_control_put() is called automatically on driver
+ * detach.
+ *
+ * See reset_control_get_optional_shared() for more information.
+ */
static inline struct reset_control *devm_reset_control_get_optional_shared(
struct device *dev, const char *id)
{
diff --git a/include/linux/resource_ext.h b/include/linux/resource_ext.h
index 06da59b23b79..ff0339df56af 100644
--- a/include/linux/resource_ext.h
+++ b/include/linux/resource_ext.h
@@ -66,4 +66,16 @@ resource_list_destroy_entry(struct resource_entry *entry)
#define resource_list_for_each_entry_safe(entry, tmp, list) \
list_for_each_entry_safe((entry), (tmp), (list), node)
+static inline struct resource_entry *
+resource_list_first_type(struct list_head *list, unsigned long type)
+{
+ struct resource_entry *entry;
+
+ resource_list_for_each_entry(entry, list) {
+ if (resource_type(entry->res) == type)
+ return entry;
+ }
+ return NULL;
+}
+
#endif /* _LINUX_RESOURCE_EXT_H */
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index beb9a9da1699..70ebef866cc8 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -972,9 +972,9 @@ static inline int rhashtable_lookup_insert_key(
/**
* rhashtable_lookup_get_insert_key - lookup and insert object into hash table
* @ht: hash table
+ * @key: key
* @obj: pointer to hash head inside object
* @params: hash table parameters
- * @data: pointer to element data already in hashes
*
* Just like rhashtable_lookup_insert_key(), but this function returns the
* object if it exists, NULL if it does not and the insertion was successful,
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 1a40277b512c..df0124eabece 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -6,7 +6,7 @@
#include <linux/seq_file.h>
#include <linux/poll.h>
-struct ring_buffer;
+struct trace_buffer;
struct ring_buffer_iter;
/*
@@ -77,13 +77,13 @@ u64 ring_buffer_event_time_stamp(struct ring_buffer_event *event);
* else
* ring_buffer_unlock_commit(buffer, event);
*/
-void ring_buffer_discard_commit(struct ring_buffer *buffer,
+void ring_buffer_discard_commit(struct trace_buffer *buffer,
struct ring_buffer_event *event);
/*
* size is in bytes for each per CPU buffer.
*/
-struct ring_buffer *
+struct trace_buffer *
__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key);
/*
@@ -97,38 +97,38 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
__ring_buffer_alloc((size), (flags), &__key); \
})
-int ring_buffer_wait(struct ring_buffer *buffer, int cpu, int full);
-__poll_t ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
+int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full);
+__poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu,
struct file *filp, poll_table *poll_table);
#define RING_BUFFER_ALL_CPUS -1
-void ring_buffer_free(struct ring_buffer *buffer);
+void ring_buffer_free(struct trace_buffer *buffer);
-int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu);
+int ring_buffer_resize(struct trace_buffer *buffer, unsigned long size, int cpu);
-void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val);
+void ring_buffer_change_overwrite(struct trace_buffer *buffer, int val);
-struct ring_buffer_event *ring_buffer_lock_reserve(struct ring_buffer *buffer,
+struct ring_buffer_event *ring_buffer_lock_reserve(struct trace_buffer *buffer,
unsigned long length);
-int ring_buffer_unlock_commit(struct ring_buffer *buffer,
+int ring_buffer_unlock_commit(struct trace_buffer *buffer,
struct ring_buffer_event *event);
-int ring_buffer_write(struct ring_buffer *buffer,
+int ring_buffer_write(struct trace_buffer *buffer,
unsigned long length, void *data);
-void ring_buffer_nest_start(struct ring_buffer *buffer);
-void ring_buffer_nest_end(struct ring_buffer *buffer);
+void ring_buffer_nest_start(struct trace_buffer *buffer);
+void ring_buffer_nest_end(struct trace_buffer *buffer);
struct ring_buffer_event *
-ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts,
+ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
struct ring_buffer_event *
-ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts,
+ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts,
unsigned long *lost_events);
struct ring_buffer_iter *
-ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu, gfp_t flags);
+ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags);
void ring_buffer_read_prepare_sync(void);
void ring_buffer_read_start(struct ring_buffer_iter *iter);
void ring_buffer_read_finish(struct ring_buffer_iter *iter);
@@ -140,59 +140,59 @@ ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts);
void ring_buffer_iter_reset(struct ring_buffer_iter *iter);
int ring_buffer_iter_empty(struct ring_buffer_iter *iter);
-unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu);
+unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu);
-void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu);
-void ring_buffer_reset(struct ring_buffer *buffer);
+void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu);
+void ring_buffer_reset(struct trace_buffer *buffer);
#ifdef CONFIG_RING_BUFFER_ALLOW_SWAP
-int ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
- struct ring_buffer *buffer_b, int cpu);
+int ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
+ struct trace_buffer *buffer_b, int cpu);
#else
static inline int
-ring_buffer_swap_cpu(struct ring_buffer *buffer_a,
- struct ring_buffer *buffer_b, int cpu)
+ring_buffer_swap_cpu(struct trace_buffer *buffer_a,
+ struct trace_buffer *buffer_b, int cpu)
{
return -ENODEV;
}
#endif
-bool ring_buffer_empty(struct ring_buffer *buffer);
-bool ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu);
-
-void ring_buffer_record_disable(struct ring_buffer *buffer);
-void ring_buffer_record_enable(struct ring_buffer *buffer);
-void ring_buffer_record_off(struct ring_buffer *buffer);
-void ring_buffer_record_on(struct ring_buffer *buffer);
-bool ring_buffer_record_is_on(struct ring_buffer *buffer);
-bool ring_buffer_record_is_set_on(struct ring_buffer *buffer);
-void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu);
-void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu);
-
-u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_entries(struct ring_buffer *buffer);
-unsigned long ring_buffer_overruns(struct ring_buffer *buffer);
-unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu);
-unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu);
-
-u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu);
-void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer,
+bool ring_buffer_empty(struct trace_buffer *buffer);
+bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu);
+
+void ring_buffer_record_disable(struct trace_buffer *buffer);
+void ring_buffer_record_enable(struct trace_buffer *buffer);
+void ring_buffer_record_off(struct trace_buffer *buffer);
+void ring_buffer_record_on(struct trace_buffer *buffer);
+bool ring_buffer_record_is_on(struct trace_buffer *buffer);
+bool ring_buffer_record_is_set_on(struct trace_buffer *buffer);
+void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu);
+void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu);
+
+u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_entries(struct trace_buffer *buffer);
+unsigned long ring_buffer_overruns(struct trace_buffer *buffer);
+unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu);
+unsigned long ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu);
+
+u64 ring_buffer_time_stamp(struct trace_buffer *buffer, int cpu);
+void ring_buffer_normalize_time_stamp(struct trace_buffer *buffer,
int cpu, u64 *ts);
-void ring_buffer_set_clock(struct ring_buffer *buffer,
+void ring_buffer_set_clock(struct trace_buffer *buffer,
u64 (*clock)(void));
-void ring_buffer_set_time_stamp_abs(struct ring_buffer *buffer, bool abs);
-bool ring_buffer_time_stamp_abs(struct ring_buffer *buffer);
+void ring_buffer_set_time_stamp_abs(struct trace_buffer *buffer, bool abs);
+bool ring_buffer_time_stamp_abs(struct trace_buffer *buffer);
-size_t ring_buffer_nr_pages(struct ring_buffer *buffer, int cpu);
-size_t ring_buffer_nr_dirty_pages(struct ring_buffer *buffer, int cpu);
+size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu);
+size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu);
-void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu);
-void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data);
-int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
+void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu);
+void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data);
+int ring_buffer_read_page(struct trace_buffer *buffer, void **data_page,
size_t len, int cpu, int full);
struct trace_seq;
diff --git a/include/linux/rpmsg/mtk_rpmsg.h b/include/linux/rpmsg/mtk_rpmsg.h
new file mode 100644
index 000000000000..363b60178040
--- /dev/null
+++ b/include/linux/rpmsg/mtk_rpmsg.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright 2019 Google LLC.
+ */
+
+#ifndef __LINUX_RPMSG_MTK_RPMSG_H
+#define __LINUX_RPMSG_MTK_RPMSG_H
+
+#include <linux/platform_device.h>
+#include <linux/remoteproc.h>
+
+typedef void (*ipi_handler_t)(void *data, unsigned int len, void *priv);
+
+/*
+ * struct mtk_rpmsg_info - IPI functions tied to the rpmsg device.
+ * @register_ipi: register IPI handler for an IPI id.
+ * @unregister_ipi: unregister IPI handler for a registered IPI id.
+ * @send_ipi: send IPI to an IPI id. wait is the timeout (in msecs) to wait
+ * until response, or 0 if there's no timeout.
+ * @ns_ipi_id: the IPI id used for name service, or -1 if name service isn't
+ * supported.
+ */
+struct mtk_rpmsg_info {
+ int (*register_ipi)(struct platform_device *pdev, u32 id,
+ ipi_handler_t handler, void *priv);
+ void (*unregister_ipi)(struct platform_device *pdev, u32 id);
+ int (*send_ipi)(struct platform_device *pdev, u32 id,
+ void *buf, unsigned int len, unsigned int wait);
+ int ns_ipi_id;
+};
+
+struct rproc_subdev *
+mtk_rpmsg_create_rproc_subdev(struct platform_device *pdev,
+ struct mtk_rpmsg_info *info);
+
+void mtk_rpmsg_destroy_rproc_subdev(struct rproc_subdev *subdev);
+
+#endif
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index df666cf29ef1..23990bd29040 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -159,11 +159,17 @@ struct rtc_device {
};
#define to_rtc_device(d) container_of(d, struct rtc_device, dev)
+#define rtc_lock(d) mutex_lock(&d->ops_lock)
+#define rtc_unlock(d) mutex_unlock(&d->ops_lock)
+
/* useful timestamps */
+#define RTC_TIMESTAMP_BEGIN_0000 -62167219200ULL /* 0000-01-01 00:00:00 */
#define RTC_TIMESTAMP_BEGIN_1900 -2208988800LL /* 1900-01-01 00:00:00 */
#define RTC_TIMESTAMP_BEGIN_2000 946684800LL /* 2000-01-01 00:00:00 */
#define RTC_TIMESTAMP_END_2063 2966371199LL /* 2063-12-31 23:59:59 */
+#define RTC_TIMESTAMP_END_2079 3471292799LL /* 2079-12-31 23:59:59 */
#define RTC_TIMESTAMP_END_2099 4102444799LL /* 2099-12-31 23:59:59 */
+#define RTC_TIMESTAMP_END_2199 7258118399LL /* 2199-12-31 23:59:59 */
#define RTC_TIMESTAMP_END_9999 253402300799LL /* 9999-12-31 23:59:59 */
extern struct rtc_device *devm_rtc_device_register(struct device *dev,
diff --git a/include/linux/rtc/ds1685.h b/include/linux/rtc/ds1685.h
index 43aec568ba7c..67ee9d20cc5a 100644
--- a/include/linux/rtc/ds1685.h
+++ b/include/linux/rtc/ds1685.h
@@ -42,14 +42,11 @@
struct ds1685_priv {
struct rtc_device *dev;
void __iomem *regs;
+ void __iomem *data;
u32 regstep;
- resource_size_t baseaddr;
- size_t size;
int irq_num;
bool bcd_mode;
bool no_irq;
- bool uie_unsupported;
- bool alloc_io_resources;
u8 (*read)(struct ds1685_priv *, int);
void (*write)(struct ds1685_priv *, int, u8);
void (*prepare_poweroff)(void);
@@ -74,12 +71,13 @@ struct ds1685_rtc_platform_data {
const bool bcd_mode;
const bool no_irq;
const bool uie_unsupported;
- const bool alloc_io_resources;
- u8 (*plat_read)(struct ds1685_priv *, int);
- void (*plat_write)(struct ds1685_priv *, int, u8);
void (*plat_prepare_poweroff)(void);
void (*plat_wake_alarm)(void);
void (*plat_post_ram_clear)(void);
+ enum {
+ ds1685_reg_direct,
+ ds1685_reg_indirect
+ } access_type;
};
diff --git a/include/linux/rtsx_pci.h b/include/linux/rtsx_pci.h
index f87da30a58b1..65b8142a7fed 100644
--- a/include/linux/rtsx_pci.h
+++ b/include/linux/rtsx_pci.h
@@ -1262,6 +1262,7 @@ struct rtsx_pcr {
#define PID_5250 0x5250
#define PID_525A 0x525A
#define PID_5260 0x5260
+#define PID_5261 0x5261
#define CHK_PCI_PID(pcr, pid) ((pcr)->pci->device == (pid))
#define PCI_VID(pcr) ((pcr)->pci->vendor)
diff --git a/include/linux/rwlock_api_smp.h b/include/linux/rwlock_api_smp.h
index 86ebb4bf9c6e..abfb53ab11be 100644
--- a/include/linux/rwlock_api_smp.h
+++ b/include/linux/rwlock_api_smp.h
@@ -215,14 +215,14 @@ static inline void __raw_write_lock(rwlock_t *lock)
static inline void __raw_write_unlock(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
preempt_enable();
}
static inline void __raw_read_unlock(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
preempt_enable();
}
@@ -230,7 +230,7 @@ static inline void __raw_read_unlock(rwlock_t *lock)
static inline void
__raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
local_irq_restore(flags);
preempt_enable();
@@ -238,7 +238,7 @@ __raw_read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
static inline void __raw_read_unlock_irq(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
local_irq_enable();
preempt_enable();
@@ -246,7 +246,7 @@ static inline void __raw_read_unlock_irq(rwlock_t *lock)
static inline void __raw_read_unlock_bh(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_read_unlock(lock);
__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
@@ -254,7 +254,7 @@ static inline void __raw_read_unlock_bh(rwlock_t *lock)
static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
unsigned long flags)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
local_irq_restore(flags);
preempt_enable();
@@ -262,7 +262,7 @@ static inline void __raw_write_unlock_irqrestore(rwlock_t *lock,
static inline void __raw_write_unlock_irq(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
local_irq_enable();
preempt_enable();
@@ -270,7 +270,7 @@ static inline void __raw_write_unlock_irq(rwlock_t *lock)
static inline void __raw_write_unlock_bh(rwlock_t *lock)
{
- rwlock_release(&lock->dep_map, 1, _RET_IP_);
+ rwlock_release(&lock->dep_map, _RET_IP_);
do_raw_write_unlock(lock);
__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
diff --git a/include/linux/rwlock_types.h b/include/linux/rwlock_types.h
index 857a72ceb794..3bd03e18061c 100644
--- a/include/linux/rwlock_types.h
+++ b/include/linux/rwlock_types.h
@@ -22,7 +22,11 @@ typedef struct {
#define RWLOCK_MAGIC 0xdeaf1eed
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define RW_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+# define RW_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_CONFIG, \
+ }
#else
# define RW_DEP_MAP_INIT(lockname)
#endif
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 00d6054687dd..7e5b2a4eb560 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -53,12 +53,6 @@ struct rw_semaphore {
#endif
};
-/*
- * Setting all bits of the owner field except bit 0 will indicate
- * that the rwsem is writer-owned with an unknown owner.
- */
-#define RWSEM_OWNER_UNKNOWN (-2L)
-
/* In all implementations count != 0 means locked */
static inline int rwsem_is_locked(struct rw_semaphore *sem)
{
@@ -71,7 +65,11 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
/* Common initializer macros and functions */
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define __RWSEM_DEP_MAP_INIT(lockname) , .dep_map = { .name = #lockname }
+# define __RWSEM_DEP_MAP_INIT(lockname) \
+ , .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_SLEEP, \
+ }
#else
# define __RWSEM_DEP_MAP_INIT(lockname)
#endif
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index a986ac12a848..e40d019c3d9d 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -216,15 +216,6 @@ int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
*/
bool sbitmap_any_bit_set(const struct sbitmap *sb);
-/**
- * sbitmap_any_bit_clear() - Check for an unset bit in a &struct
- * sbitmap.
- * @sb: Bitmap to check.
- *
- * Return: true if any bit in the bitmap is clear, false otherwise.
- */
-bool sbitmap_any_bit_clear(const struct sbitmap *sb);
-
#define SB_NR_TO_INDEX(sb, bitnr) ((bitnr) >> (sb)->shift)
#define SB_NR_TO_BIT(sb, bitnr) ((bitnr) & ((1U << (sb)->shift) - 1U))
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 67a1d86981a9..5d0d2fc018e9 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -250,16 +250,21 @@ struct prev_cputime {
enum vtime_state {
/* Task is sleeping or running in a CPU with VTIME inactive: */
VTIME_INACTIVE = 0,
- /* Task runs in userspace in a CPU with VTIME active: */
- VTIME_USER,
+ /* Task is idle */
+ VTIME_IDLE,
/* Task runs in kernelspace in a CPU with VTIME active: */
VTIME_SYS,
+ /* Task runs in userspace in a CPU with VTIME active: */
+ VTIME_USER,
+ /* Task runs as guests in a CPU with VTIME active: */
+ VTIME_GUEST,
};
struct vtime {
seqcount_t seqcount;
unsigned long long starttime;
enum vtime_state state;
+ unsigned int cpu;
u64 utime;
u64 stime;
u64 gtime;
@@ -351,28 +356,30 @@ struct util_est {
} __attribute__((__aligned__(sizeof(u64))));
/*
- * The load_avg/util_avg accumulates an infinite geometric series
- * (see __update_load_avg() in kernel/sched/fair.c).
+ * The load/runnable/util_avg accumulates an infinite geometric series
+ * (see __update_load_avg_cfs_rq() in kernel/sched/pelt.c).
*
* [load_avg definition]
*
* load_avg = runnable% * scale_load_down(load)
*
- * where runnable% is the time ratio that a sched_entity is runnable.
- * For cfs_rq, it is the aggregated load_avg of all runnable and
- * blocked sched_entities.
+ * [runnable_avg definition]
+ *
+ * runnable_avg = runnable% * SCHED_CAPACITY_SCALE
*
* [util_avg definition]
*
* util_avg = running% * SCHED_CAPACITY_SCALE
*
- * where running% is the time ratio that a sched_entity is running on
- * a CPU. For cfs_rq, it is the aggregated util_avg of all runnable
- * and blocked sched_entities.
+ * where runnable% is the time ratio that a sched_entity is runnable and
+ * running% the time ratio that a sched_entity is running.
+ *
+ * For cfs_rq, they are the aggregated values of all runnable and blocked
+ * sched_entities.
*
- * load_avg and util_avg don't direcly factor frequency scaling and CPU
- * capacity scaling. The scaling is done through the rq_clock_pelt that
- * is used for computing those signals (see update_rq_clock_pelt())
+ * The load/runnable/util_avg doesn't direcly factor frequency scaling and CPU
+ * capacity scaling. The scaling is done through the rq_clock_pelt that is used
+ * for computing those signals (see update_rq_clock_pelt())
*
* N.B., the above ratios (runnable% and running%) themselves are in the
* range of [0, 1]. To do fixed point arithmetics, we therefore scale them
@@ -396,11 +403,11 @@ struct util_est {
struct sched_avg {
u64 last_update_time;
u64 load_sum;
- u64 runnable_load_sum;
+ u64 runnable_sum;
u32 util_sum;
u32 period_contrib;
unsigned long load_avg;
- unsigned long runnable_load_avg;
+ unsigned long runnable_avg;
unsigned long util_avg;
struct util_est util_est;
} ____cacheline_aligned;
@@ -444,7 +451,6 @@ struct sched_statistics {
struct sched_entity {
/* For load-balancing: */
struct load_weight load;
- unsigned long runnable_weight;
struct rb_node run_node;
struct list_head group_node;
unsigned int on_rq;
@@ -465,6 +471,8 @@ struct sched_entity {
struct cfs_rq *cfs_rq;
/* rq "owned" by this entity/group: */
struct cfs_rq *my_q;
+ /* cached value of my_q->h_nr_running */
+ unsigned long runnable_weight;
#endif
#ifdef CONFIG_SMP
@@ -777,9 +785,12 @@ struct task_struct {
unsigned frozen:1;
#endif
#ifdef CONFIG_BLK_CGROUP
- /* to be used once the psi infrastructure lands upstream. */
unsigned use_memdelay:1;
#endif
+#ifdef CONFIG_PSI
+ /* Stalled due to lack of memory */
+ unsigned in_memstall:1;
+#endif
unsigned long atomic_flags; /* Flags requiring atomic access. */
@@ -857,7 +868,7 @@ struct task_struct {
u64 start_time;
/* Boot based time in nsecs: */
- u64 real_start_time;
+ u64 start_boottime;
/* MM fault and swap info: this can arguably be seen as either mm-specific or thread-specific: */
unsigned long min_flt;
@@ -912,7 +923,7 @@ struct task_struct {
/* Signal handlers: */
struct signal_struct *signal;
- struct sighand_struct *sighand;
+ struct sighand_struct __rcu *sighand;
sigset_t blocked;
sigset_t real_blocked;
/* Restored if set_restore_sigmask() was used: */
@@ -965,6 +976,7 @@ struct task_struct {
#ifdef CONFIG_TRACE_IRQFLAGS
unsigned int irq_events;
+ unsigned int hardirq_threaded;
unsigned long hardirq_enable_ip;
unsigned long hardirq_disable_ip;
unsigned int hardirq_enable_event;
@@ -977,6 +989,7 @@ struct task_struct {
unsigned int softirq_enable_event;
int softirqs_enabled;
int softirq_context;
+ int irq_config;
#endif
#ifdef CONFIG_LOCKDEP
@@ -1054,6 +1067,8 @@ struct task_struct {
#endif
struct list_head pi_state_list;
struct futex_pi_state *pi_state_cache;
+ struct mutex futex_exit_mutex;
+ unsigned int futex_state;
#endif
#ifdef CONFIG_PERF_EVENTS
struct perf_event_context *perf_event_ctxp[perf_nr_task_contexts];
@@ -1203,6 +1218,8 @@ struct task_struct {
#endif /* CONFIG_TRACING */
#ifdef CONFIG_KCOV
+ /* See kernel/kcov.c for more details. */
+
/* Coverage collection mode enabled for this task (0 if disabled): */
unsigned int kcov_mode;
@@ -1214,6 +1231,12 @@ struct task_struct {
/* KCOV descriptor wired with this task or NULL: */
struct kcov *kcov;
+
+ /* KCOV common handle for remote coverage collection: */
+ u64 kcov_handle;
+
+ /* KCOV sequence number: */
+ int kcov_sequence;
#endif
#ifdef CONFIG_MEMCG
@@ -1442,7 +1465,6 @@ extern struct pid *cad_pid;
*/
#define PF_IDLE 0x00000002 /* I am an IDLE thread */
#define PF_EXITING 0x00000004 /* Getting shut down */
-#define PF_EXITPIDONE 0x00000008 /* PI exit done on shut down */
#define PF_VCPU 0x00000010 /* I'm a virtual CPU */
#define PF_WQ_WORKER 0x00000020 /* I'm a workqueue worker */
#define PF_FORKNOEXEC 0x00000040 /* Forked but didn't exec */
@@ -1463,11 +1485,11 @@ extern struct pid *cad_pid;
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
#define PF_SWAPWRITE 0x00800000 /* Allowed to write to swap */
-#define PF_MEMSTALL 0x01000000 /* Stalled due to lack of memory */
#define PF_UMH 0x02000000 /* I'm an Usermodehelper process */
#define PF_NO_SETAFFINITY 0x04000000 /* Userland is not allowed to meddle with cpus_mask */
#define PF_MCE_EARLY 0x08000000 /* Early kill for mce process policy */
#define PF_MEMALLOC_NOCMA 0x10000000 /* All allocation request will have _GFP_MOVABLE cleared */
+#define PF_IO_WORKER 0x20000000 /* Task is an IO worker */
#define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */
#define PF_SUSPEND_TASK 0x80000000 /* This thread called freeze_processes() and should not be frozen */
@@ -1914,11 +1936,11 @@ static inline void rseq_migrate(struct task_struct *t)
/*
* If parent process has a registered restartable sequences area, the
- * child inherits. Only applies when forking a process, not a thread.
+ * child inherits. Unregister rseq for a clone with CLONE_VM set.
*/
static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
- if (clone_flags & CLONE_THREAD) {
+ if (clone_flags & CLONE_VM) {
t->rseq = NULL;
t->rseq_sig = 0;
t->rseq_event_mask = 0;
diff --git a/include/linux/sched/cpufreq.h b/include/linux/sched/cpufreq.h
index afa940cd50dc..3ed5aa18593f 100644
--- a/include/linux/sched/cpufreq.h
+++ b/include/linux/sched/cpufreq.h
@@ -9,9 +9,10 @@
*/
#define SCHED_CPUFREQ_IOWAIT (1U << 0)
-#define SCHED_CPUFREQ_MIGRATION (1U << 1)
#ifdef CONFIG_CPU_FREQ
+struct cpufreq_policy;
+
struct update_util_data {
void (*func)(struct update_util_data *data, u64 time, unsigned int flags);
};
@@ -20,6 +21,7 @@ void cpufreq_add_update_util_hook(int cpu, struct update_util_data *data,
void (*func)(struct update_util_data *data, u64 time,
unsigned int flags));
void cpufreq_remove_update_util_hook(int cpu);
+bool cpufreq_this_cpu_can_update(struct cpufreq_policy *policy);
static inline unsigned long map_util_freq(unsigned long util,
unsigned long freq, unsigned long cap)
diff --git a/include/linux/sched/isolation.h b/include/linux/sched/isolation.h
index 6c8512d3be88..0fbcbacd1b29 100644
--- a/include/linux/sched/isolation.h
+++ b/include/linux/sched/isolation.h
@@ -13,6 +13,7 @@ enum hk_flags {
HK_FLAG_TICK = (1 << 4),
HK_FLAG_DOMAIN = (1 << 5),
HK_FLAG_WQ = (1 << 6),
+ HK_FLAG_MANAGED_IRQ = (1 << 7),
};
#ifdef CONFIG_CPU_ISOLATION
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index e6770012db18..c49257a3b510 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -117,8 +117,10 @@ extern struct mm_struct *get_task_mm(struct task_struct *task);
* succeeds.
*/
extern struct mm_struct *mm_access(struct task_struct *task, unsigned int mode);
-/* Remove the current tasks stale references to the old mm_struct */
-extern void mm_release(struct task_struct *, struct mm_struct *);
+/* Remove the current tasks stale references to the old mm_struct on exit() */
+extern void exit_mm_release(struct task_struct *, struct mm_struct *);
+/* Remove the current tasks stale references to the old mm_struct on exec() */
+extern void exec_mm_release(struct task_struct *, struct mm_struct *);
#ifdef CONFIG_MEMCG
extern void mm_update_next_owner(struct mm_struct *mm);
diff --git a/include/linux/sched/nohz.h b/include/linux/sched/nohz.h
index 1abe91ff6e4a..6d67e9a5af6b 100644
--- a/include/linux/sched/nohz.h
+++ b/include/linux/sched/nohz.h
@@ -15,9 +15,11 @@ static inline void nohz_balance_enter_idle(int cpu) { }
#ifdef CONFIG_NO_HZ_COMMON
void calc_load_nohz_start(void);
+void calc_load_nohz_remote(struct rq *rq);
void calc_load_nohz_stop(void);
#else
static inline void calc_load_nohz_start(void) { }
+static inline void calc_load_nohz_remote(struct rq *rq) { }
static inline void calc_load_nohz_stop(void) { }
#endif /* CONFIG_NO_HZ_COMMON */
diff --git a/include/linux/sched/task.h b/include/linux/sched/task.h
index 4b1c3b664f51..f1879884238e 100644
--- a/include/linux/sched/task.h
+++ b/include/linux/sched/task.h
@@ -26,6 +26,9 @@ struct kernel_clone_args {
unsigned long stack;
unsigned long stack_size;
unsigned long tls;
+ pid_t *set_tid;
+ /* Number of elements in *set_tid */
+ size_t set_tid_size;
};
/*
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index f341163fedc9..af9319e4cfb9 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -225,6 +225,14 @@ unsigned long arch_scale_cpu_capacity(int cpu)
}
#endif
+#ifndef arch_scale_thermal_pressure
+static __always_inline
+unsigned long arch_scale_thermal_pressure(int cpu)
+{
+ return 0;
+}
+#endif
+
static inline int task_node(const struct task_struct *p)
{
return cpu_to_node(task_cpu(p));
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index 881fea47c83d..5c873a59b387 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -257,6 +257,7 @@ enum scmi_std_protocol {
struct scmi_device {
u32 id;
u8 protocol_id;
+ const char *name;
struct device dev;
struct scmi_handle *handle;
};
@@ -264,11 +265,13 @@ struct scmi_device {
#define to_scmi_dev(d) container_of(d, struct scmi_device, dev)
struct scmi_device *
-scmi_device_create(struct device_node *np, struct device *parent, int protocol);
+scmi_device_create(struct device_node *np, struct device *parent, int protocol,
+ const char *name);
void scmi_device_destroy(struct scmi_device *scmi_dev);
struct scmi_device_id {
u8 protocol_id;
+ const char *name;
};
struct scmi_driver {
diff --git a/include/linux/seccomp.h b/include/linux/seccomp.h
index 84868d37b35d..4192369b8418 100644
--- a/include/linux/seccomp.h
+++ b/include/linux/seccomp.h
@@ -7,7 +7,8 @@
#define SECCOMP_FILTER_FLAG_MASK (SECCOMP_FILTER_FLAG_TSYNC | \
SECCOMP_FILTER_FLAG_LOG | \
SECCOMP_FILTER_FLAG_SPEC_ALLOW | \
- SECCOMP_FILTER_FLAG_NEW_LISTENER)
+ SECCOMP_FILTER_FLAG_NEW_LISTENER | \
+ SECCOMP_FILTER_FLAG_TSYNC_ESRCH)
#ifdef CONFIG_SECCOMP
@@ -33,10 +34,10 @@ struct seccomp {
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
extern int __secure_computing(const struct seccomp_data *sd);
-static inline int secure_computing(const struct seccomp_data *sd)
+static inline int secure_computing(void)
{
if (unlikely(test_thread_flag(TIF_SECCOMP)))
- return __secure_computing(sd);
+ return __secure_computing(NULL);
return 0;
}
#else
@@ -59,7 +60,7 @@ struct seccomp { };
struct seccomp_filter { };
#ifdef CONFIG_HAVE_ARCH_SECCOMP_FILTER
-static inline int secure_computing(struct seccomp_data *sd) { return 0; }
+static inline int secure_computing(void) { return 0; }
#else
static inline void secure_computing_strict(int this_syscall) { return; }
#endif
diff --git a/include/linux/security.h b/include/linux/security.h
index 896da4429c17..a8d9310472df 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -116,15 +116,19 @@ enum lockdown_reason {
LOCKDOWN_MODULE_PARAMETERS,
LOCKDOWN_MMIOTRACE,
LOCKDOWN_DEBUGFS,
+ LOCKDOWN_XMON_WR,
LOCKDOWN_INTEGRITY_MAX,
LOCKDOWN_KCORE,
LOCKDOWN_KPROBES,
LOCKDOWN_BPF_READ,
LOCKDOWN_PERF,
LOCKDOWN_TRACEFS,
+ LOCKDOWN_XMON_RW,
LOCKDOWN_CONFIDENTIALITY_MAX,
};
+extern const char *const lockdown_reasons[LOCKDOWN_CONFIDENTIALITY_MAX+1];
+
/* These functions are in security/commoncap.c */
extern int cap_capable(const struct cred *cred, struct user_namespace *ns,
int cap, unsigned int opts);
@@ -1894,5 +1898,42 @@ static inline void security_bpf_prog_free(struct bpf_prog_aux *aux)
#endif /* CONFIG_SECURITY */
#endif /* CONFIG_BPF_SYSCALL */
-#endif /* ! __LINUX_SECURITY_H */
+#ifdef CONFIG_PERF_EVENTS
+struct perf_event_attr;
+struct perf_event;
+#ifdef CONFIG_SECURITY
+extern int security_perf_event_open(struct perf_event_attr *attr, int type);
+extern int security_perf_event_alloc(struct perf_event *event);
+extern void security_perf_event_free(struct perf_event *event);
+extern int security_perf_event_read(struct perf_event *event);
+extern int security_perf_event_write(struct perf_event *event);
+#else
+static inline int security_perf_event_open(struct perf_event_attr *attr,
+ int type)
+{
+ return 0;
+}
+
+static inline int security_perf_event_alloc(struct perf_event *event)
+{
+ return 0;
+}
+
+static inline void security_perf_event_free(struct perf_event *event)
+{
+}
+
+static inline int security_perf_event_read(struct perf_event *event)
+{
+ return 0;
+}
+
+static inline int security_perf_event_write(struct perf_event *event)
+{
+ return 0;
+}
+#endif /* CONFIG_SECURITY */
+#endif /* CONFIG_PERF_EVENTS */
+
+#endif /* ! __LINUX_SECURITY_H */
diff --git a/include/linux/sed-opal.h b/include/linux/sed-opal.h
index 53c28d750a45..1ac0d712a9c3 100644
--- a/include/linux/sed-opal.h
+++ b/include/linux/sed-opal.h
@@ -42,6 +42,7 @@ static inline bool is_sed_ioctl(unsigned int cmd)
case IOC_OPAL_PSID_REVERT_TPR:
case IOC_OPAL_MBR_DONE:
case IOC_OPAL_WRITE_SHADOW_MBR:
+ case IOC_OPAL_GENERIC_TABLE_RW:
return true;
}
return false;
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
index aa5deb041c25..fb0205d87d3c 100644
--- a/include/linux/seq_buf.h
+++ b/include/linux/seq_buf.h
@@ -125,6 +125,9 @@ extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len);
extern int seq_buf_putmem_hex(struct seq_buf *s, const void *mem,
unsigned int len);
extern int seq_buf_path(struct seq_buf *s, const struct path *path, const char *esc);
+extern int seq_buf_hex_dump(struct seq_buf *s, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii);
#ifdef CONFIG_BINARY_PRINTF
extern int
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 5998e1f4ff06..770c2bf3aa43 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -160,6 +160,19 @@ static const struct file_operations __name ## _fops = { \
.release = single_release, \
}
+#define DEFINE_PROC_SHOW_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+ \
+static const struct proc_ops __name ## _proc_ops = { \
+ .proc_open = __name ## _open, \
+ .proc_read = seq_read, \
+ .proc_lseek = seq_lseek, \
+ .proc_release = single_release, \
+}
+
static inline struct user_namespace *seq_user_ns(struct seq_file *seq)
{
#ifdef CONFIG_USER_NS
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index bcf4cf26b8c8..0491d963d47e 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -79,7 +79,7 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
local_irq_save(flags);
seqcount_acquire_read(&l->dep_map, 0, 0, _RET_IP_);
- seqcount_release(&l->dep_map, 1, _RET_IP_);
+ seqcount_release(&l->dep_map, _RET_IP_);
local_irq_restore(flags);
}
@@ -384,7 +384,7 @@ static inline void write_seqcount_begin(seqcount_t *s)
static inline void write_seqcount_end(seqcount_t *s)
{
- seqcount_release(&s->dep_map, 1, _RET_IP_);
+ seqcount_release(&s->dep_map, _RET_IP_);
raw_write_seqcount_end(s);
}
diff --git a/include/linux/serial_8250.h b/include/linux/serial_8250.h
index bb2bc99388ca..6a8e8c48c882 100644
--- a/include/linux/serial_8250.h
+++ b/include/linux/serial_8250.h
@@ -25,6 +25,7 @@ struct plat_serial8250_port {
unsigned char regshift; /* register shift */
unsigned char iotype; /* UPIO_* */
unsigned char hub6;
+ unsigned char has_sysrq; /* supports magic SysRq */
upf_t flags; /* UPF_* flags */
unsigned int type; /* If UPF_FIXED_TYPE */
unsigned int (*serial_in)(struct uart_port *, int);
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 2b78cc734719..52404ef1694e 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -161,11 +161,6 @@ struct uart_port {
struct uart_icount icount; /* statistics */
struct console *cons; /* struct console, if any */
-#if defined(CONFIG_SERIAL_CORE_CONSOLE) || defined(SUPPORT_SYSRQ)
- unsigned long sysrq; /* sysrq timeout */
- unsigned int sysrq_ch; /* char for sysrq */
-#endif
-
/* flags must be updated while holding port mutex */
upf_t flags;
@@ -244,9 +239,13 @@ struct uart_port {
resource_size_t mapbase; /* for ioremap */
resource_size_t mapsize;
struct device *dev; /* parent device */
+
+ unsigned long sysrq; /* sysrq timeout */
+ unsigned int sysrq_ch; /* char for sysrq */
+ unsigned char has_sysrq;
+
unsigned char hub6; /* this should be in the 8250 driver */
unsigned char suspended;
- unsigned char unused[2];
const char *name; /* port name */
struct attribute_group *attr_group; /* port specific attributes */
const struct attribute_group **tty_groups; /* all attributes (serial core use only) */
@@ -460,81 +459,11 @@ extern void uart_handle_cts_change(struct uart_port *uport,
extern void uart_insert_char(struct uart_port *port, unsigned int status,
unsigned int overrun, unsigned int ch, unsigned int flag);
-#if defined(SUPPORT_SYSRQ) && defined(CONFIG_MAGIC_SYSRQ_SERIAL)
-static inline int
-uart_handle_sysrq_char(struct uart_port *port, unsigned int ch)
-{
- if (port->sysrq) {
- if (ch && time_before(jiffies, port->sysrq)) {
- handle_sysrq(ch);
- port->sysrq = 0;
- return 1;
- }
- port->sysrq = 0;
- }
- return 0;
-}
-static inline int
-uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch)
-{
- if (port->sysrq) {
- if (ch && time_before(jiffies, port->sysrq)) {
- port->sysrq_ch = ch;
- port->sysrq = 0;
- return 1;
- }
- port->sysrq = 0;
- }
- return 0;
-}
-static inline void
-uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags)
-{
- int sysrq_ch;
-
- sysrq_ch = port->sysrq_ch;
- port->sysrq_ch = 0;
-
- spin_unlock_irqrestore(&port->lock, irqflags);
-
- if (sysrq_ch)
- handle_sysrq(sysrq_ch);
-}
-#else
-static inline int
-uart_handle_sysrq_char(struct uart_port *port, unsigned int ch) { return 0; }
-static inline int
-uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch) { return 0; }
-static inline void
-uart_unlock_and_check_sysrq(struct uart_port *port, unsigned long irqflags)
-{
- spin_unlock_irqrestore(&port->lock, irqflags);
-}
-#endif
-
-/*
- * We do the SysRQ and SAK checking like this...
- */
-static inline int uart_handle_break(struct uart_port *port)
-{
- struct uart_state *state = port->state;
-
- if (port->handle_break)
- port->handle_break(port);
-
-#ifdef SUPPORT_SYSRQ
- if (port->cons && port->cons->index == port->line) {
- if (!port->sysrq) {
- port->sysrq = jiffies + HZ*5;
- return 1;
- }
- port->sysrq = 0;
- }
-#endif
- if (port->flags & UPF_SAK)
- do_SAK(state->port.tty);
- return 0;
-}
+extern int uart_handle_sysrq_char(struct uart_port *port, unsigned int ch);
+extern int uart_prepare_sysrq_char(struct uart_port *port, unsigned int ch);
+extern void uart_unlock_and_check_sysrq(struct uart_port *port,
+ unsigned long irqflags);
+extern int uart_handle_break(struct uart_port *port);
/*
* UART_ENABLE_MS - determine if port should enable modem status irqs
diff --git a/include/linux/sfp.h b/include/linux/sfp.h
index 1c35428e98bc..38893e4dd0f0 100644
--- a/include/linux/sfp.h
+++ b/include/linux/sfp.h
@@ -275,6 +275,61 @@ struct sfp_diag {
__be16 cal_v_offset;
} __packed;
+/* SFF8024 defined constants */
+enum {
+ SFF8024_ID_UNK = 0x00,
+ SFF8024_ID_SFF_8472 = 0x02,
+ SFF8024_ID_SFP = 0x03,
+ SFF8024_ID_DWDM_SFP = 0x0b,
+ SFF8024_ID_QSFP_8438 = 0x0c,
+ SFF8024_ID_QSFP_8436_8636 = 0x0d,
+ SFF8024_ID_QSFP28_8636 = 0x11,
+
+ SFF8024_ENCODING_UNSPEC = 0x00,
+ SFF8024_ENCODING_8B10B = 0x01,
+ SFF8024_ENCODING_4B5B = 0x02,
+ SFF8024_ENCODING_NRZ = 0x03,
+ SFF8024_ENCODING_8472_MANCHESTER= 0x04,
+ SFF8024_ENCODING_8472_SONET = 0x05,
+ SFF8024_ENCODING_8472_64B66B = 0x06,
+ SFF8024_ENCODING_8436_MANCHESTER= 0x06,
+ SFF8024_ENCODING_8436_SONET = 0x04,
+ SFF8024_ENCODING_8436_64B66B = 0x05,
+ SFF8024_ENCODING_256B257B = 0x07,
+ SFF8024_ENCODING_PAM4 = 0x08,
+
+ SFF8024_CONNECTOR_UNSPEC = 0x00,
+ /* codes 01-05 not supportable on SFP, but some modules have single SC */
+ SFF8024_CONNECTOR_SC = 0x01,
+ SFF8024_CONNECTOR_FIBERJACK = 0x06,
+ SFF8024_CONNECTOR_LC = 0x07,
+ SFF8024_CONNECTOR_MT_RJ = 0x08,
+ SFF8024_CONNECTOR_MU = 0x09,
+ SFF8024_CONNECTOR_SG = 0x0a,
+ SFF8024_CONNECTOR_OPTICAL_PIGTAIL= 0x0b,
+ SFF8024_CONNECTOR_MPO_1X12 = 0x0c,
+ SFF8024_CONNECTOR_MPO_2X16 = 0x0d,
+ SFF8024_CONNECTOR_HSSDC_II = 0x20,
+ SFF8024_CONNECTOR_COPPER_PIGTAIL= 0x21,
+ SFF8024_CONNECTOR_RJ45 = 0x22,
+ SFF8024_CONNECTOR_NOSEPARATE = 0x23,
+ SFF8024_CONNECTOR_MXC_2X16 = 0x24,
+
+ SFF8024_ECC_UNSPEC = 0x00,
+ SFF8024_ECC_100G_25GAUI_C2M_AOC = 0x01,
+ SFF8024_ECC_100GBASE_SR4_25GBASE_SR = 0x02,
+ SFF8024_ECC_100GBASE_LR4_25GBASE_LR = 0x03,
+ SFF8024_ECC_100GBASE_ER4_25GBASE_ER = 0x04,
+ SFF8024_ECC_100GBASE_SR10 = 0x05,
+ SFF8024_ECC_100GBASE_CR4 = 0x0b,
+ SFF8024_ECC_25GBASE_CR_S = 0x0c,
+ SFF8024_ECC_25GBASE_CR_N = 0x0d,
+ SFF8024_ECC_10GBASE_T_SFI = 0x16,
+ SFF8024_ECC_10GBASE_T_SR = 0x1c,
+ SFF8024_ECC_5GBASE_T = 0x1d,
+ SFF8024_ECC_2_5GBASE_T = 0x1e,
+};
+
/* SFP EEPROM registers */
enum {
SFP_PHYS_ID = 0x00,
@@ -309,34 +364,7 @@ enum {
SFP_SFF8472_COMPLIANCE = 0x5e,
SFP_CC_EXT = 0x5f,
- SFP_PHYS_ID_SFF = 0x02,
- SFP_PHYS_ID_SFP = 0x03,
SFP_PHYS_EXT_ID_SFP = 0x04,
- SFP_CONNECTOR_UNSPEC = 0x00,
- /* codes 01-05 not supportable on SFP, but some modules have single SC */
- SFP_CONNECTOR_SC = 0x01,
- SFP_CONNECTOR_FIBERJACK = 0x06,
- SFP_CONNECTOR_LC = 0x07,
- SFP_CONNECTOR_MT_RJ = 0x08,
- SFP_CONNECTOR_MU = 0x09,
- SFP_CONNECTOR_SG = 0x0a,
- SFP_CONNECTOR_OPTICAL_PIGTAIL = 0x0b,
- SFP_CONNECTOR_MPO_1X12 = 0x0c,
- SFP_CONNECTOR_MPO_2X16 = 0x0d,
- SFP_CONNECTOR_HSSDC_II = 0x20,
- SFP_CONNECTOR_COPPER_PIGTAIL = 0x21,
- SFP_CONNECTOR_RJ45 = 0x22,
- SFP_CONNECTOR_NOSEPARATE = 0x23,
- SFP_CONNECTOR_MXC_2X16 = 0x24,
- SFP_ENCODING_UNSPEC = 0x00,
- SFP_ENCODING_8B10B = 0x01,
- SFP_ENCODING_4B5B = 0x02,
- SFP_ENCODING_NRZ = 0x03,
- SFP_ENCODING_8472_MANCHESTER = 0x04,
- SFP_ENCODING_8472_SONET = 0x05,
- SFP_ENCODING_8472_64B66B = 0x06,
- SFP_ENCODING_256B257B = 0x07,
- SFP_ENCODING_PAM4 = 0x08,
SFP_OPTIONS_HIGH_POWER_LEVEL = BIT(13),
SFP_OPTIONS_PAGING_A2 = BIT(12),
SFP_OPTIONS_RETIMER = BIT(11),
@@ -428,6 +456,10 @@ enum {
SFP_TEC_CUR = 0x6c,
SFP_STATUS = 0x6e,
+ SFP_STATUS_TX_DISABLE = BIT(7),
+ SFP_STATUS_TX_DISABLE_FORCE = BIT(6),
+ SFP_STATUS_TX_FAULT = BIT(2),
+ SFP_STATUS_RX_LOS = BIT(1),
SFP_ALARM0 = 0x70,
SFP_ALARM0_TEMP_HIGH = BIT(7),
SFP_ALARM0_TEMP_LOW = BIT(6),
@@ -475,6 +507,8 @@ struct sfp_bus;
* @module_insert: called after a module has been detected to determine
* whether the module is supported for the upstream device.
* @module_remove: called after the module has been removed.
+ * @module_start: called after the PHY probe step
+ * @module_stop: called before the PHY is removed
* @link_down: called when the link is non-operational for whatever
* reason.
* @link_up: called when the link is operational.
@@ -488,6 +522,8 @@ struct sfp_upstream_ops {
void (*detach)(void *priv, struct sfp_bus *bus);
int (*module_insert)(void *priv, const struct sfp_eeprom_id *id);
void (*module_remove)(void *priv);
+ int (*module_start)(void *priv);
+ void (*module_stop)(void *priv);
void (*link_down)(void *priv);
void (*link_up)(void *priv);
int (*connect_phy)(void *priv, struct phy_device *);
@@ -497,10 +533,10 @@ struct sfp_upstream_ops {
#if IS_ENABLED(CONFIG_SFP)
int sfp_parse_port(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
unsigned long *support);
+bool sfp_may_have_phy(struct sfp_bus *bus, const struct sfp_eeprom_id *id);
void sfp_parse_support(struct sfp_bus *bus, const struct sfp_eeprom_id *id,
unsigned long *support);
phy_interface_t sfp_select_interface(struct sfp_bus *bus,
- const struct sfp_eeprom_id *id,
unsigned long *link_modes);
int sfp_get_module_info(struct sfp_bus *bus, struct ethtool_modinfo *modinfo);
@@ -508,10 +544,11 @@ int sfp_get_module_eeprom(struct sfp_bus *bus, struct ethtool_eeprom *ee,
u8 *data);
void sfp_upstream_start(struct sfp_bus *bus);
void sfp_upstream_stop(struct sfp_bus *bus);
-struct sfp_bus *sfp_register_upstream(struct fwnode_handle *fwnode,
- void *upstream,
- const struct sfp_upstream_ops *ops);
-void sfp_unregister_upstream(struct sfp_bus *bus);
+void sfp_bus_put(struct sfp_bus *bus);
+struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode);
+int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
+ const struct sfp_upstream_ops *ops);
+void sfp_bus_del_upstream(struct sfp_bus *bus);
#else
static inline int sfp_parse_port(struct sfp_bus *bus,
const struct sfp_eeprom_id *id,
@@ -520,6 +557,12 @@ static inline int sfp_parse_port(struct sfp_bus *bus,
return PORT_OTHER;
}
+static inline bool sfp_may_have_phy(struct sfp_bus *bus,
+ const struct sfp_eeprom_id *id)
+{
+ return false;
+}
+
static inline void sfp_parse_support(struct sfp_bus *bus,
const struct sfp_eeprom_id *id,
unsigned long *support)
@@ -527,7 +570,6 @@ static inline void sfp_parse_support(struct sfp_bus *bus,
}
static inline phy_interface_t sfp_select_interface(struct sfp_bus *bus,
- const struct sfp_eeprom_id *id,
unsigned long *link_modes)
{
return PHY_INTERFACE_MODE_NA;
@@ -553,14 +595,22 @@ static inline void sfp_upstream_stop(struct sfp_bus *bus)
{
}
-static inline struct sfp_bus *sfp_register_upstream(
- struct fwnode_handle *fwnode, void *upstream,
- const struct sfp_upstream_ops *ops)
+static inline void sfp_bus_put(struct sfp_bus *bus)
+{
+}
+
+static inline struct sfp_bus *sfp_bus_find_fwnode(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline int sfp_bus_add_upstream(struct sfp_bus *bus, void *upstream,
+ const struct sfp_upstream_ops *ops)
{
- return (struct sfp_bus *)-1;
+ return 0;
}
-static inline void sfp_unregister_upstream(struct sfp_bus *bus)
+static inline void sfp_bus_del_upstream(struct sfp_bus *bus)
{
}
#endif
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index de8e4b71e3ba..d56fefef8905 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -8,6 +8,7 @@
#include <linux/pagemap.h>
#include <linux/percpu_counter.h>
#include <linux/xattr.h>
+#include <linux/fs_parser.h>
/* inode in-kernel data */
@@ -49,7 +50,7 @@ static inline struct shmem_inode_info *SHMEM_I(struct inode *inode)
/*
* Functions in mm/shmem.c called directly from elsewhere:
*/
-extern const struct fs_parameter_description shmem_fs_parameters;
+extern const struct fs_parameter_spec shmem_fs_parameters[];
extern int shmem_init(void);
extern int shmem_init_fs_context(struct fs_context *fc);
extern struct file *shmem_file_setup(const char *name,
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 1a5f88316b08..05bacd2ab135 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -444,12 +444,12 @@ void signals_init(void);
int restore_altstack(const stack_t __user *);
int __save_altstack(stack_t __user *, unsigned long);
-#define save_altstack_ex(uss, sp) do { \
+#define unsafe_save_altstack(uss, sp, label) do { \
stack_t __user *__uss = uss; \
struct task_struct *t = current; \
- put_user_ex((void __user *)t->sas_ss_sp, &__uss->ss_sp); \
- put_user_ex(t->sas_ss_flags, &__uss->ss_flags); \
- put_user_ex(t->sas_ss_size, &__uss->ss_size); \
+ unsafe_put_user((void __user *)t->sas_ss_sp, &__uss->ss_sp, label); \
+ unsafe_put_user(t->sas_ss_flags, &__uss->ss_flags, label); \
+ unsafe_put_user(t->sas_ss_size, &__uss->ss_size, label); \
if (t->sas_ss_flags & SS_AUTODISARM) \
sas_ss_reset(t); \
} while (0);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 8688f7adfda7..e59620234415 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -592,6 +592,8 @@ enum {
SKB_GSO_UDP = 1 << 16,
SKB_GSO_UDP_L4 = 1 << 17,
+
+ SKB_GSO_FRAGLIST = 1 << 18,
};
#if BITS_PER_LONG > 32
@@ -609,9 +611,15 @@ typedef unsigned char *sk_buff_data_t;
* @next: Next buffer in list
* @prev: Previous buffer in list
* @tstamp: Time we arrived/left
+ * @skb_mstamp_ns: (aka @tstamp) earliest departure time; start point
+ * for retransmit timer
* @rbnode: RB tree node, alternative to next/prev for netem/tcp
+ * @list: queue head
* @sk: Socket we are owned by
+ * @ip_defrag_offset: (aka @sk) alternate use of @sk, used in
+ * fragmentation management
* @dev: Device we arrived on/are leaving by
+ * @dev_scratch: (aka @dev) alternate use of @dev when @dev would be %NULL
* @cb: Control buffer. Free for use by every layer. Put private vars here
* @_skb_refdst: destination entry (with norefcount bit)
* @sp: the security path, used for xfrm
@@ -630,12 +638,15 @@ typedef unsigned char *sk_buff_data_t;
* @pkt_type: Packet class
* @fclone: skbuff clone status
* @ipvs_property: skbuff is owned by ipvs
+ * @inner_protocol_type: whether the inner protocol is
+ * ENCAP_TYPE_ETHER or ENCAP_TYPE_IPPROTO
+ * @remcsum_offload: remote checksum offload is enabled
* @offload_fwd_mark: Packet was L2-forwarded in hardware
* @offload_l3_fwd_mark: Packet was L3-forwarded in hardware
* @tc_skip_classify: do not classify packet. set by IFB device
* @tc_at_ingress: used within tc_classify to distinguish in/egress
- * @tc_redirected: packet was redirected by a tc action
- * @tc_from_ingress: if tc_redirected, tc_at_ingress at time of redirect
+ * @redirected: packet was redirected by packet classifier
+ * @from_ingress: packet was redirected from the ingress path
* @peeked: this packet has been seen already, so stats have been
* done for it, don't do them again
* @nf_trace: netfilter packet trace flag
@@ -648,6 +659,8 @@ typedef unsigned char *sk_buff_data_t;
* @tc_index: Traffic control index
* @hash: the packet hash
* @queue_mapping: Queue mapping for multiqueue devices
+ * @head_frag: skb was allocated from page fragments,
+ * not allocated by kmalloc() or vmalloc().
* @pfmemalloc: skbuff was allocated from PFMEMALLOC reserves
* @active_extensions: active extensions (skb_ext_id types)
* @ndisc_nodetype: router type (from link layer)
@@ -658,15 +671,28 @@ typedef unsigned char *sk_buff_data_t;
* @wifi_acked_valid: wifi_acked was set
* @wifi_acked: whether frame was acked on wifi or not
* @no_fcs: Request NIC to treat last 4 bytes as Ethernet FCS
+ * @encapsulation: indicates the inner headers in the skbuff are valid
+ * @encap_hdr_csum: software checksum is needed
+ * @csum_valid: checksum is already valid
* @csum_not_inet: use CRC32c to resolve CHECKSUM_PARTIAL
+ * @csum_complete_sw: checksum was completed by software
+ * @csum_level: indicates the number of consecutive checksums found in
+ * the packet minus one that have been verified as
+ * CHECKSUM_UNNECESSARY (max 3)
* @dst_pending_confirm: need to confirm neighbour
* @decrypted: Decrypted SKB
* @napi_id: id of the NAPI struct this skb came from
+ * @sender_cpu: (aka @napi_id) source CPU in XPS
* @secmark: security marking
* @mark: Generic packet mark
+ * @reserved_tailroom: (aka @mark) number of bytes of free space available
+ * at the tail of an sk_buff
+ * @vlan_present: VLAN tag is present
* @vlan_proto: vlan encapsulation protocol
* @vlan_tci: vlan tag control information
* @inner_protocol: Protocol (encapsulation)
+ * @inner_ipproto: (aka @inner_protocol) stores ipproto when
+ * skb->inner_protocol_type == ENCAP_TYPE_IPPROTO;
* @inner_transport_header: Inner transport layer header (encapsulation)
* @inner_network_header: Network layer header (encapsulation)
* @inner_mac_header: Link layer header (encapsulation)
@@ -748,7 +774,9 @@ struct sk_buff {
#endif
#define CLONED_OFFSET() offsetof(struct sk_buff, __cloned_offset)
+ /* private: */
__u8 __cloned_offset[0];
+ /* public: */
__u8 cloned:1,
nohdr:1,
fclone:2,
@@ -773,7 +801,9 @@ struct sk_buff {
#endif
#define PKT_TYPE_OFFSET() offsetof(struct sk_buff, __pkt_type_offset)
+ /* private: */
__u8 __pkt_type_offset[0];
+ /* public: */
__u8 pkt_type:3;
__u8 ignore_df:1;
__u8 nf_trace:1;
@@ -796,7 +826,9 @@ struct sk_buff {
#define PKT_VLAN_PRESENT_BIT 0
#endif
#define PKT_VLAN_PRESENT_OFFSET() offsetof(struct sk_buff, __pkt_vlan_present_offset)
+ /* private: */
__u8 __pkt_vlan_present_offset[0];
+ /* public: */
__u8 vlan_present:1;
__u8 csum_complete_sw:1;
__u8 csum_level:2;
@@ -816,8 +848,10 @@ struct sk_buff {
#ifdef CONFIG_NET_CLS_ACT
__u8 tc_skip_classify:1;
__u8 tc_at_ingress:1;
- __u8 tc_redirected:1;
- __u8 tc_from_ingress:1;
+#endif
+#ifdef CONFIG_NET_REDIRECT
+ __u8 redirected:1;
+ __u8 from_ingress:1;
#endif
#ifdef CONFIG_TLS_DEVICE
__u8 decrypted:1;
@@ -1478,6 +1512,11 @@ static inline void skb_mark_not_on_list(struct sk_buff *skb)
skb->next = NULL;
}
+/* Iterate through singly-linked GSO fragments of an skb. */
+#define skb_list_walk_safe(first, skb, next_skb) \
+ for ((skb) = (first), (next_skb) = (skb) ? (skb)->next : NULL; (skb); \
+ (skb) = (next_skb), (next_skb) = (skb) ? (skb)->next : NULL)
+
static inline void skb_list_del_init(struct sk_buff *skb)
{
__list_del_entry(&skb->list);
@@ -1795,7 +1834,7 @@ static inline struct sk_buff *skb_peek_next(struct sk_buff *skb,
*/
static inline struct sk_buff *skb_peek_tail(const struct sk_buff_head *list_)
{
- struct sk_buff *skb = list_->prev;
+ struct sk_buff *skb = READ_ONCE(list_->prev);
if (skb == (struct sk_buff *)list_)
skb = NULL;
@@ -1815,6 +1854,18 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
}
/**
+ * skb_queue_len_lockless - get queue length
+ * @list_: list to measure
+ *
+ * Return the length of an &sk_buff queue.
+ * This variant can be used in lockless contexts.
+ */
+static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_)
+{
+ return READ_ONCE(list_->qlen);
+}
+
+/**
* __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
* @list: queue to initialize
*
@@ -1861,7 +1912,9 @@ static inline void __skb_insert(struct sk_buff *newsk,
struct sk_buff *prev, struct sk_buff *next,
struct sk_buff_head *list)
{
- /* see skb_queue_empty_lockless() for the opposite READ_ONCE() */
+ /* See skb_queue_empty_lockless() and skb_peek_tail()
+ * for the opposite READ_ONCE()
+ */
WRITE_ONCE(newsk->next, next);
WRITE_ONCE(newsk->prev, prev);
WRITE_ONCE(next->prev, newsk);
@@ -2017,7 +2070,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
{
struct sk_buff *next, *prev;
- list->qlen--;
+ WRITE_ONCE(list->qlen, list->qlen - 1);
next = skb->next;
prev = skb->prev;
skb->next = skb->prev = NULL;
@@ -2277,12 +2330,12 @@ static inline void *pskb_pull(struct sk_buff *skb, unsigned int len)
return unlikely(len > skb->len) ? NULL : __pskb_pull(skb, len);
}
-static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
+static inline bool pskb_may_pull(struct sk_buff *skb, unsigned int len)
{
if (likely(len <= skb_headlen(skb)))
- return 1;
+ return true;
if (unlikely(len > skb->len))
- return 0;
+ return false;
return __pskb_pull_tail(skb, len - skb_headlen(skb)) != NULL;
}
@@ -3457,7 +3510,8 @@ static inline void skb_frag_list_init(struct sk_buff *skb)
for (iter = skb_shinfo(skb)->frag_list; iter; iter = iter->next)
-int __skb_wait_for_more_packets(struct sock *sk, int *err, long *timeo_p,
+int __skb_wait_for_more_packets(struct sock *sk, struct sk_buff_head *queue,
+ int *err, long *timeo_p,
const struct sk_buff *skb);
struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
struct sk_buff_head *queue,
@@ -3466,12 +3520,16 @@ struct sk_buff *__skb_try_recv_from_queue(struct sock *sk,
struct sk_buff *skb),
int *off, int *err,
struct sk_buff **last);
-struct sk_buff *__skb_try_recv_datagram(struct sock *sk, unsigned flags,
+struct sk_buff *__skb_try_recv_datagram(struct sock *sk,
+ struct sk_buff_head *queue,
+ unsigned int flags,
void (*destructor)(struct sock *sk,
struct sk_buff *skb),
int *off, int *err,
struct sk_buff **last);
-struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned flags,
+struct sk_buff *__skb_recv_datagram(struct sock *sk,
+ struct sk_buff_head *sk_queue,
+ unsigned int flags,
void (*destructor)(struct sock *sk,
struct sk_buff *skb),
int *off, int *err);
@@ -3521,14 +3579,17 @@ void skb_scrub_packet(struct sk_buff *skb, bool xnet);
bool skb_gso_validate_network_len(const struct sk_buff *skb, unsigned int mtu);
bool skb_gso_validate_mac_len(const struct sk_buff *skb, unsigned int len);
struct sk_buff *skb_segment(struct sk_buff *skb, netdev_features_t features);
+struct sk_buff *skb_segment_list(struct sk_buff *skb, netdev_features_t features,
+ unsigned int offset);
struct sk_buff *skb_vlan_untag(struct sk_buff *skb);
int skb_ensure_writable(struct sk_buff *skb, int write_len);
int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci);
int skb_vlan_pop(struct sk_buff *skb);
int skb_vlan_push(struct sk_buff *skb, __be16 vlan_proto, u16 vlan_tci);
int skb_mpls_push(struct sk_buff *skb, __be32 mpls_lse, __be16 mpls_proto,
- int mac_len);
-int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len);
+ int mac_len, bool ethernet);
+int skb_mpls_pop(struct sk_buff *skb, __be16 next_proto, int mac_len,
+ bool ethernet);
int skb_mpls_update_lse(struct sk_buff *skb, __be32 mpls_lse);
int skb_mpls_dec_ttl(struct sk_buff *skb);
struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
@@ -3656,9 +3717,12 @@ static inline void skb_get_new_timestamp(const struct sk_buff *skb,
}
static inline void skb_get_timestampns(const struct sk_buff *skb,
- struct timespec *stamp)
+ struct __kernel_old_timespec *stamp)
{
- *stamp = ktime_to_timespec(skb->tstamp);
+ struct timespec64 ts = ktime_to_timespec64(skb->tstamp);
+
+ stamp->tv_sec = ts.tv_sec;
+ stamp->tv_nsec = ts.tv_nsec;
}
static inline void skb_get_new_timestampns(const struct sk_buff *skb,
@@ -4086,6 +4150,9 @@ enum skb_ext_id {
#if IS_ENABLED(CONFIG_NET_TC_SKB_EXT)
TC_SKB_EXT,
#endif
+#if IS_ENABLED(CONFIG_MPTCP)
+ SKB_EXT_MPTCP,
+#endif
SKB_EXT_NUM, /* must be last */
};
@@ -4106,6 +4173,9 @@ struct skb_ext {
char data[0] __aligned(8);
};
+struct skb_ext *__skb_ext_alloc(void);
+void *__skb_ext_set(struct sk_buff *skb, enum skb_ext_id id,
+ struct skb_ext *ext);
void *skb_ext_add(struct sk_buff *skb, enum skb_ext_id id);
void __skb_ext_del(struct sk_buff *skb, enum skb_ext_id id);
void __skb_ext_put(struct skb_ext *ext);
@@ -4511,5 +4581,31 @@ static inline __wsum lco_csum(struct sk_buff *skb)
return csum_partial(l4_hdr, csum_start - l4_hdr, partial);
}
+static inline bool skb_is_redirected(const struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_REDIRECT
+ return skb->redirected;
+#else
+ return false;
+#endif
+}
+
+static inline void skb_set_redirected(struct sk_buff *skb, bool from_ingress)
+{
+#ifdef CONFIG_NET_REDIRECT
+ skb->redirected = 1;
+ skb->from_ingress = from_ingress;
+ if (skb->from_ingress)
+ skb->tstamp = 0;
+#endif
+}
+
+static inline void skb_reset_redirect(struct sk_buff *skb)
+{
+#ifdef CONFIG_NET_REDIRECT
+ skb->redirected = 0;
+#endif
+}
+
#endif /* __KERNEL__ */
#endif /* _LINUX_SKBUFF_H */
diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
index ce7055259877..14d61bba0b79 100644
--- a/include/linux/skmsg.h
+++ b/include/linux/skmsg.h
@@ -14,6 +14,7 @@
#include <net/strparser.h>
#define MAX_MSG_FRAGS MAX_SKB_FRAGS
+#define NR_MSG_FRAG_IDS (MAX_MSG_FRAGS + 1)
enum __sk_action {
__SK_DROP = 0,
@@ -28,13 +29,16 @@ struct sk_msg_sg {
u32 end;
u32 size;
u32 copybreak;
- bool copy[MAX_MSG_FRAGS];
- /* The extra element is used for chaining the front and sections when
- * the list becomes partitioned (e.g. end < start). The crypto APIs
- * require the chaining.
+ unsigned long copy;
+ /* The extra two elements:
+ * 1) used for chaining the front and sections when the list becomes
+ * partitioned (e.g. end < start). The crypto APIs require the
+ * chaining;
+ * 2) to chain tailer SG entries after the message.
*/
- struct scatterlist data[MAX_MSG_FRAGS + 1];
+ struct scatterlist data[MAX_MSG_FRAGS + 2];
};
+static_assert(BITS_PER_LONG >= NR_MSG_FRAG_IDS);
/* UAPI in filter.c depends on struct sk_msg_sg being first element. */
struct sk_msg {
@@ -141,13 +145,13 @@ static inline void sk_msg_apply_bytes(struct sk_psock *psock, u32 bytes)
static inline u32 sk_msg_iter_dist(u32 start, u32 end)
{
- return end >= start ? end - start : end + (MAX_MSG_FRAGS - start);
+ return end >= start ? end - start : end + (NR_MSG_FRAG_IDS - start);
}
#define sk_msg_iter_var_prev(var) \
do { \
if (var == 0) \
- var = MAX_MSG_FRAGS - 1; \
+ var = NR_MSG_FRAG_IDS - 1; \
else \
var--; \
} while (0)
@@ -155,7 +159,7 @@ static inline u32 sk_msg_iter_dist(u32 start, u32 end)
#define sk_msg_iter_var_next(var) \
do { \
var++; \
- if (var == MAX_MSG_FRAGS) \
+ if (var == NR_MSG_FRAG_IDS) \
var = 0; \
} while (0)
@@ -172,9 +176,9 @@ static inline void sk_msg_clear_meta(struct sk_msg *msg)
static inline void sk_msg_init(struct sk_msg *msg)
{
- BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != MAX_MSG_FRAGS);
+ BUILD_BUG_ON(ARRAY_SIZE(msg->sg.data) - 1 != NR_MSG_FRAG_IDS);
memset(msg, 0, sizeof(*msg));
- sg_init_marker(msg->sg.data, MAX_MSG_FRAGS);
+ sg_init_marker(msg->sg.data, NR_MSG_FRAG_IDS);
}
static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
@@ -195,14 +199,11 @@ static inline void sk_msg_xfer_full(struct sk_msg *dst, struct sk_msg *src)
static inline bool sk_msg_full(const struct sk_msg *msg)
{
- return (msg->sg.end == msg->sg.start) && msg->sg.size;
+ return sk_msg_iter_dist(msg->sg.start, msg->sg.end) == MAX_MSG_FRAGS;
}
static inline u32 sk_msg_elem_used(const struct sk_msg *msg)
{
- if (sk_msg_full(msg))
- return MAX_MSG_FRAGS;
-
return sk_msg_iter_dist(msg->sg.start, msg->sg.end);
}
@@ -230,7 +231,7 @@ static inline void sk_msg_compute_data_pointers(struct sk_msg *msg)
{
struct scatterlist *sge = sk_msg_elem(msg, msg->sg.start);
- if (msg->sg.copy[msg->sg.start]) {
+ if (test_bit(msg->sg.start, &msg->sg.copy)) {
msg->data = NULL;
msg->data_end = NULL;
} else {
@@ -249,7 +250,7 @@ static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
sg_set_page(sge, page, len, offset);
sg_unmark_end(sge);
- msg->sg.copy[msg->sg.end] = true;
+ __set_bit(msg->sg.end, &msg->sg.copy);
msg->sg.size += len;
sk_msg_iter_next(msg, end);
}
@@ -257,7 +258,10 @@ static inline void sk_msg_page_add(struct sk_msg *msg, struct page *page,
static inline void sk_msg_sg_copy(struct sk_msg *msg, u32 i, bool copy_state)
{
do {
- msg->sg.copy[i] = copy_state;
+ if (copy_state)
+ __set_bit(i, &msg->sg.copy);
+ else
+ __clear_bit(i, &msg->sg.copy);
sk_msg_iter_var_next(i);
if (i == msg->sg.end)
break;
@@ -354,17 +358,22 @@ static inline void sk_psock_update_proto(struct sock *sk,
static inline void sk_psock_restore_proto(struct sock *sk,
struct sk_psock *psock)
{
- sk->sk_write_space = psock->saved_write_space;
+ sk->sk_prot->unhash = psock->saved_unhash;
if (psock->sk_proto) {
struct inet_connection_sock *icsk = inet_csk(sk);
bool has_ulp = !!icsk->icsk_ulp_data;
- if (has_ulp)
- tcp_update_ulp(sk, psock->sk_proto);
- else
+ if (has_ulp) {
+ tcp_update_ulp(sk, psock->sk_proto,
+ psock->saved_write_space);
+ } else {
sk->sk_prot = psock->sk_proto;
+ sk->sk_write_space = psock->saved_write_space;
+ }
psock->sk_proto = NULL;
+ } else {
+ sk->sk_write_space = psock->saved_write_space;
}
}
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 4d2a2fa55ed5..03a389358562 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -184,7 +184,6 @@ void memcg_deactivate_kmem_caches(struct mem_cgroup *, struct mem_cgroup *);
/*
* Common kmalloc functions provided by all allocators
*/
-void * __must_check __krealloc(const void *, size_t, gfp_t);
void * __must_check krealloc(const void *, size_t, gfp_t);
void kfree(const void *);
void kzfree(const void *);
@@ -561,26 +560,6 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
return __kmalloc(size, flags);
}
-/*
- * Determine size used for the nth kmalloc cache.
- * return size or 0 if a kmalloc cache for that
- * size does not exist
- */
-static __always_inline unsigned int kmalloc_size(unsigned int n)
-{
-#ifndef CONFIG_SLOB
- if (n > 2)
- return 1U << n;
-
- if (n == 1 && KMALLOC_MIN_SIZE <= 32)
- return 96;
-
- if (n == 2 && KMALLOC_MIN_SIZE <= 64)
- return 192;
-#endif
- return 0;
-}
-
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
#ifndef CONFIG_SLOB
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 6fc856c9eda5..cbc9162689d0 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -15,6 +15,7 @@
#include <linux/llist.h>
typedef void (*smp_call_func_t)(void *info);
+typedef bool (*smp_cond_func_t)(int cpu, void *info);
struct __call_single_data {
struct llist_node llist;
smp_call_func_t func;
@@ -49,13 +50,11 @@ void on_each_cpu_mask(const struct cpumask *mask, smp_call_func_t func,
* cond_func returns a positive value. This may include the local
* processor.
*/
-void on_each_cpu_cond(bool (*cond_func)(int cpu, void *info),
- smp_call_func_t func, void *info, bool wait,
- gfp_t gfp_flags);
+void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
+ void *info, bool wait);
-void on_each_cpu_cond_mask(bool (*cond_func)(int cpu, void *info),
- smp_call_func_t func, void *info, bool wait,
- gfp_t gfp_flags, const struct cpumask *mask);
+void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
+ void *info, bool wait, const struct cpumask *mask);
int smp_call_function_single_async(int cpu, call_single_data_t *csd);
diff --git a/include/linux/soc/mediatek/mtk-cmdq.h b/include/linux/soc/mediatek/mtk-cmdq.h
index 9618debb9ceb..a74c1d5acdf3 100644
--- a/include/linux/soc/mediatek/mtk-cmdq.h
+++ b/include/linux/soc/mediatek/mtk-cmdq.h
@@ -15,6 +15,12 @@
struct cmdq_pkt;
+struct cmdq_client_reg {
+ u8 subsys;
+ u16 offset;
+ u16 size;
+};
+
struct cmdq_client {
spinlock_t lock;
u32 pkt_cnt;
@@ -25,6 +31,21 @@ struct cmdq_client {
};
/**
+ * cmdq_dev_get_client_reg() - parse cmdq client reg from the device
+ * node of CMDQ client
+ * @dev: device of CMDQ mailbox client
+ * @client_reg: CMDQ client reg pointer
+ * @idx: the index of desired reg
+ *
+ * Return: 0 for success; else the error code is returned
+ *
+ * Help CMDQ client parsing the cmdq client reg
+ * from the device node of CMDQ client.
+ */
+int cmdq_dev_get_client_reg(struct device *dev,
+ struct cmdq_client_reg *client_reg, int idx);
+
+/**
* cmdq_mbox_create() - create CMDQ mailbox client and channel
* @dev: device of CMDQ mailbox client
* @index: index of CMDQ mailbox channel
@@ -100,6 +121,38 @@ int cmdq_pkt_wfe(struct cmdq_pkt *pkt, u16 event);
int cmdq_pkt_clear_event(struct cmdq_pkt *pkt, u16 event);
/**
+ * cmdq_pkt_poll() - Append polling command to the CMDQ packet, ask GCE to
+ * execute an instruction that wait for a specified
+ * hardware register to check for the value w/o mask.
+ * All GCE hardware threads will be blocked by this
+ * instruction.
+ * @pkt: the CMDQ packet
+ * @subsys: the CMDQ sub system code
+ * @offset: register offset from CMDQ sub system
+ * @value: the specified target register value
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_poll(struct cmdq_pkt *pkt, u8 subsys,
+ u16 offset, u32 value);
+
+/**
+ * cmdq_pkt_poll_mask() - Append polling command to the CMDQ packet, ask GCE to
+ * execute an instruction that wait for a specified
+ * hardware register to check for the value w/ mask.
+ * All GCE hardware threads will be blocked by this
+ * instruction.
+ * @pkt: the CMDQ packet
+ * @subsys: the CMDQ sub system code
+ * @offset: register offset from CMDQ sub system
+ * @value: the specified target register value
+ * @mask: the specified target register mask
+ *
+ * Return: 0 for success; else the error code is returned
+ */
+int cmdq_pkt_poll_mask(struct cmdq_pkt *pkt, u8 subsys,
+ u16 offset, u32 value, u32 mask);
+/**
* cmdq_pkt_flush_async() - trigger CMDQ to asynchronously execute the CMDQ
* packet and call back at the end of done packet
* @pkt: the CMDQ packet
diff --git a/include/linux/soc/mediatek/mtk_sip_svc.h b/include/linux/soc/mediatek/mtk_sip_svc.h
new file mode 100644
index 000000000000..082398e0cfb1
--- /dev/null
+++ b/include/linux/soc/mediatek/mtk_sip_svc.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 MediaTek Inc.
+ */
+#ifndef __MTK_SIP_SVC_H
+#define __MTK_SIP_SVC_H
+
+/* Error Code */
+#define SIP_SVC_E_SUCCESS 0
+#define SIP_SVC_E_NOT_SUPPORTED -1
+#define SIP_SVC_E_INVALID_PARAMS -2
+#define SIP_SVC_E_INVALID_RANGE -3
+#define SIP_SVC_E_PERMISSION_DENIED -4
+
+#ifdef CONFIG_ARM64
+#define MTK_SIP_SMC_CONVENTION ARM_SMCCC_SMC_64
+#else
+#define MTK_SIP_SMC_CONVENTION ARM_SMCCC_SMC_32
+#endif
+
+#define MTK_SIP_SMC_CMD(fn_id) \
+ ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, MTK_SIP_SMC_CONVENTION, \
+ ARM_SMCCC_OWNER_SIP, fn_id)
+
+#endif
diff --git a/include/linux/soc/mmp/cputype.h b/include/linux/soc/mmp/cputype.h
new file mode 100644
index 000000000000..221790761e8e
--- /dev/null
+++ b/include/linux/soc/mmp/cputype.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MACH_CPUTYPE_H
+#define __ASM_MACH_CPUTYPE_H
+
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+#include <asm/cputype.h>
+#endif
+
+/*
+ * CPU Stepping CPU_ID CHIP_ID
+ *
+ * PXA168 S0 0x56158400 0x0000C910
+ * PXA168 A0 0x56158400 0x00A0A168
+ * PXA910 Y1 0x56158400 0x00F2C920
+ * PXA910 A0 0x56158400 0x00F2C910
+ * PXA910 A1 0x56158400 0x00A0C910
+ * PXA920 Y0 0x56158400 0x00F2C920
+ * PXA920 A0 0x56158400 0x00A0C920
+ * PXA920 A1 0x56158400 0x00A1C920
+ * MMP2 Z0 0x560f5811 0x00F00410
+ * MMP2 Z1 0x560f5811 0x00E00410
+ * MMP2 A0 0x560f5811 0x00A0A610
+ * MMP3 A0 0x562f5842 0x00A02128
+ * MMP3 B0 0x562f5842 0x00B02128
+ */
+
+extern unsigned int mmp_chip_id;
+
+#ifdef CONFIG_CPU_PXA168
+static inline int cpu_is_pxa168(void)
+{
+ return (((read_cpuid_id() >> 8) & 0xff) == 0x84) &&
+ ((mmp_chip_id & 0xfff) == 0x168);
+}
+#else
+#define cpu_is_pxa168() (0)
+#endif
+
+/* cpu_is_pxa910() is shared on both pxa910 and pxa920 */
+#ifdef CONFIG_CPU_PXA910
+static inline int cpu_is_pxa910(void)
+{
+ return (((read_cpuid_id() >> 8) & 0xff) == 0x84) &&
+ (((mmp_chip_id & 0xfff) == 0x910) ||
+ ((mmp_chip_id & 0xfff) == 0x920));
+}
+#else
+#define cpu_is_pxa910() (0)
+#endif
+
+#if defined(CONFIG_CPU_MMP2) || defined(CONFIG_MACH_MMP2_DT)
+static inline int cpu_is_mmp2(void)
+{
+ return (((read_cpuid_id() >> 8) & 0xff) == 0x58) &&
+ (((mmp_chip_id & 0xfff) == 0x410) ||
+ ((mmp_chip_id & 0xfff) == 0x610));
+}
+#else
+#define cpu_is_mmp2() (0)
+#endif
+
+#ifdef CONFIG_MACH_MMP3_DT
+static inline int cpu_is_mmp3(void)
+{
+ return (((read_cpuid_id() >> 8) & 0xff) == 0x58) &&
+ ((mmp_chip_id & 0xffff) == 0x2128);
+}
+
+static inline int cpu_is_mmp3_a0(void)
+{
+ return (cpu_is_mmp3() &&
+ ((mmp_chip_id & 0x00ff0000) == 0x00a00000));
+}
+
+static inline int cpu_is_mmp3_b0(void)
+{
+ return (cpu_is_mmp3() &&
+ ((mmp_chip_id & 0x00ff0000) == 0x00b00000));
+}
+
+#else
+#define cpu_is_mmp3() (0)
+#define cpu_is_mmp3_a0() (0)
+#define cpu_is_mmp3_b0() (0)
+#endif
+
+#endif /* __ASM_MACH_CPUTYPE_H */
diff --git a/include/linux/soc/qcom/irq.h b/include/linux/soc/qcom/irq.h
new file mode 100644
index 000000000000..9e1ece58e55b
--- /dev/null
+++ b/include/linux/soc/qcom/irq.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __QCOM_IRQ_H
+#define __QCOM_IRQ_H
+
+#include <linux/irqdomain.h>
+
+#define GPIO_NO_WAKE_IRQ ~0U
+
+/**
+ * QCOM specific IRQ domain flags that distinguishes the handling of wakeup
+ * capable interrupts by different interrupt controllers.
+ *
+ * IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP: Line must be masked at TLMM and the
+ * interrupt configuration is done at PDC
+ * IRQ_DOMAIN_FLAG_QCOM_MPM_WAKEUP: Interrupt configuration is handled at TLMM
+ */
+#define IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP (IRQ_DOMAIN_FLAG_NONCORE << 0)
+#define IRQ_DOMAIN_FLAG_QCOM_MPM_WAKEUP (IRQ_DOMAIN_FLAG_NONCORE << 1)
+
+/**
+ * irq_domain_qcom_handle_wakeup: Return if the domain handles interrupt
+ * configuration
+ * @d: irq domain
+ *
+ * This QCOM specific irq domain call returns if the interrupt controller
+ * requires the interrupt be masked at the child interrupt controller.
+ */
+static inline bool irq_domain_qcom_handle_wakeup(const struct irq_domain *d)
+{
+ return (d->flags & IRQ_DOMAIN_FLAG_QCOM_PDC_WAKEUP);
+}
+
+#endif
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
index eb71a50b8afc..90b864655822 100644
--- a/include/linux/soc/qcom/llcc-qcom.h
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -38,33 +38,27 @@ struct llcc_slice_desc {
};
/**
- * llcc_slice_config - Data associated with the llcc slice
- * @usecase_id: usecase id for which the llcc slice is used
- * @slice_id: llcc slice id assigned to each slice
- * @max_cap: maximum capacity of the llcc slice
- * @priority: priority of the llcc slice
- * @fixed_size: whether the llcc slice can grow beyond its size
- * @bonus_ways: bonus ways associated with llcc slice
- * @res_ways: reserved ways associated with llcc slice
- * @cache_mode: mode of the llcc slice
- * @probe_target_ways: Probe only reserved and bonus ways on a cache miss
- * @dis_cap_alloc: Disable capacity based allocation
- * @retain_on_pc: Retain through power collapse
- * @activate_on_init: activate the slice on init
+ * llcc_edac_reg_data - llcc edac registers data for each error type
+ * @name: Name of the error
+ * @synd_reg: Syndrome register address
+ * @count_status_reg: Status register address to read the error count
+ * @ways_status_reg: Status register address to read the error ways
+ * @reg_cnt: Number of registers
+ * @count_mask: Mask value to get the error count
+ * @ways_mask: Mask value to get the error ways
+ * @count_shift: Shift value to get the error count
+ * @ways_shift: Shift value to get the error ways
*/
-struct llcc_slice_config {
- u32 usecase_id;
- u32 slice_id;
- u32 max_cap;
- u32 priority;
- bool fixed_size;
- u32 bonus_ways;
- u32 res_ways;
- u32 cache_mode;
- u32 probe_target_ways;
- bool dis_cap_alloc;
- bool retain_on_pc;
- bool activate_on_init;
+struct llcc_edac_reg_data {
+ char *name;
+ u64 synd_reg;
+ u64 count_status_reg;
+ u64 ways_status_reg;
+ u32 reg_cnt;
+ u32 count_mask;
+ u32 ways_mask;
+ u8 count_shift;
+ u8 ways_shift;
};
/**
@@ -93,30 +87,6 @@ struct llcc_drv_data {
int ecc_irq;
};
-/**
- * llcc_edac_reg_data - llcc edac registers data for each error type
- * @name: Name of the error
- * @synd_reg: Syndrome register address
- * @count_status_reg: Status register address to read the error count
- * @ways_status_reg: Status register address to read the error ways
- * @reg_cnt: Number of registers
- * @count_mask: Mask value to get the error count
- * @ways_mask: Mask value to get the error ways
- * @count_shift: Shift value to get the error count
- * @ways_shift: Shift value to get the error ways
- */
-struct llcc_edac_reg_data {
- char *name;
- u64 synd_reg;
- u64 count_status_reg;
- u64 ways_status_reg;
- u32 reg_cnt;
- u32 count_mask;
- u32 ways_mask;
- u8 count_shift;
- u8 ways_shift;
-};
-
#if IS_ENABLED(CONFIG_QCOM_LLCC)
/**
* llcc_slice_getd - get llcc slice descriptor
@@ -154,20 +124,6 @@ int llcc_slice_activate(struct llcc_slice_desc *desc);
*/
int llcc_slice_deactivate(struct llcc_slice_desc *desc);
-/**
- * qcom_llcc_probe - program the sct table
- * @pdev: platform device pointer
- * @table: soc sct table
- * @sz: Size of the config table
- */
-int qcom_llcc_probe(struct platform_device *pdev,
- const struct llcc_slice_config *table, u32 sz);
-
-/**
- * qcom_llcc_remove - remove the sct table
- * @pdev: Platform device pointer
- */
-int qcom_llcc_remove(struct platform_device *pdev);
#else
static inline struct llcc_slice_desc *llcc_slice_getd(u32 uid)
{
@@ -197,16 +153,6 @@ static inline int llcc_slice_deactivate(struct llcc_slice_desc *desc)
{
return -EINVAL;
}
-static inline int qcom_llcc_probe(struct platform_device *pdev,
- const struct llcc_slice_config *table, u32 sz)
-{
- return -ENODEV;
-}
-
-static inline int qcom_llcc_remove(struct platform_device *pdev)
-{
- return -ENODEV;
-}
#endif
#endif
diff --git a/include/linux/soc/qcom/smd-rpm.h b/include/linux/soc/qcom/smd-rpm.h
index 9e4fdd861a51..da304ce8c8f7 100644
--- a/include/linux/soc/qcom/smd-rpm.h
+++ b/include/linux/soc/qcom/smd-rpm.h
@@ -10,6 +10,7 @@ struct qcom_smd_rpm;
/*
* Constants used for addressing resources in the RPM.
*/
+#define QCOM_SMD_RPM_BBYB 0x62796262
#define QCOM_SMD_RPM_BOBB 0x62626f62
#define QCOM_SMD_RPM_BOOST 0x61747362
#define QCOM_SMD_RPM_BUS_CLK 0x316b6c63
diff --git a/include/linux/soc/samsung/exynos-pmu.h b/include/linux/soc/samsung/exynos-pmu.h
index fc0b445bb36b..a4f5516cc956 100644
--- a/include/linux/soc/samsung/exynos-pmu.h
+++ b/include/linux/soc/samsung/exynos-pmu.h
@@ -3,7 +3,7 @@
* Copyright (c) 2014 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * Header for EXYNOS PMU Driver support
+ * Header for Exynos PMU Driver support
*/
#ifndef __LINUX_SOC_EXYNOS_PMU_H
diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h
index 5addaf5ccbce..fc9250fb3133 100644
--- a/include/linux/soc/samsung/exynos-regs-pmu.h
+++ b/include/linux/soc/samsung/exynos-regs-pmu.h
@@ -3,7 +3,7 @@
* Copyright (c) 2010-2015 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
- * EXYNOS - Power management unit definition
+ * Exynos - Power management unit definition
*
* Notice:
* This is not a list of all Exynos Power Management Unit SFRs.
@@ -185,7 +185,7 @@
/* Only for S5Pv210 */
#define S5PV210_EINT_WAKEUP_MASK 0xC004
-/* Only for EXYNOS4210 */
+/* Only for Exynos4210 */
#define S5P_CMU_CLKSTOP_LCD1_LOWPWR 0x1154
#define S5P_CMU_RESET_LCD1_LOWPWR 0x1174
#define S5P_MODIMIF_MEM_LOWPWR 0x11C4
@@ -193,7 +193,7 @@
#define S5P_SATA_MEM_LOWPWR 0x11E4
#define S5P_LCD1_LOWPWR 0x1394
-/* Only for EXYNOS4x12 */
+/* Only for Exynos4x12 */
#define S5P_ISP_ARM_LOWPWR 0x1050
#define S5P_DIS_IRQ_ISP_ARM_LOCAL_LOWPWR 0x1054
#define S5P_DIS_IRQ_ISP_ARM_CENTRAL_LOWPWR 0x1058
@@ -234,7 +234,7 @@
#define S5P_SECSS_MEM_OPTION 0x2EC8
#define S5P_ROTATOR_MEM_OPTION 0x2F48
-/* Only for EXYNOS4412 */
+/* Only for Exynos4412 */
#define S5P_ARM_CORE2_LOWPWR 0x1020
#define S5P_DIS_IRQ_CORE2 0x1024
#define S5P_DIS_IRQ_CENTRAL2 0x1028
@@ -242,7 +242,7 @@
#define S5P_DIS_IRQ_CORE3 0x1034
#define S5P_DIS_IRQ_CENTRAL3 0x1038
-/* Only for EXYNOS3XXX */
+/* Only for Exynos3XXX */
#define EXYNOS3_ARM_CORE0_SYS_PWR_REG 0x1000
#define EXYNOS3_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG 0x1004
#define EXYNOS3_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG 0x1008
@@ -347,7 +347,7 @@
#define EXYNOS3_OPTION_USE_SC_FEEDBACK (1 << 1)
#define EXYNOS3_OPTION_SKIP_DEACTIVATE_ACEACP_IN_PWDN (1 << 7)
-/* For EXYNOS5 */
+/* For Exynos5 */
#define EXYNOS5_AUTO_WDTRESET_DISABLE 0x0408
#define EXYNOS5_MASK_WDTRESET_REQUEST 0x040C
@@ -484,7 +484,7 @@
#define EXYNOS5420_SWRESET_KFC_SEL 0x3
-/* Only for EXYNOS5420 */
+/* Only for Exynos5420 */
#define EXYNOS5420_L2RSTDISABLE_VALUE BIT(3)
#define EXYNOS5420_LPI_MASK 0x0004
@@ -645,7 +645,7 @@
| EXYNOS5420_KFC_USE_STANDBY_WFI2 \
| EXYNOS5420_KFC_USE_STANDBY_WFI3)
-/* For EXYNOS5433 */
+/* For Exynos5433 */
#define EXYNOS5433_EINT_WAKEUP_MASK (0x060C)
#define EXYNOS5433_USBHOST30_PHY_CONTROL (0x0728)
#define EXYNOS5433_PAD_RETENTION_AUD_OPTION (0x3028)
diff --git a/include/linux/soc/ti/k3-ringacc.h b/include/linux/soc/ti/k3-ringacc.h
new file mode 100644
index 000000000000..26f73df0a524
--- /dev/null
+++ b/include/linux/soc/ti/k3-ringacc.h
@@ -0,0 +1,244 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * K3 Ring Accelerator (RA) subsystem interface
+ *
+ * Copyright (C) 2019 Texas Instruments Incorporated - http://www.ti.com
+ */
+
+#ifndef __SOC_TI_K3_RINGACC_API_H_
+#define __SOC_TI_K3_RINGACC_API_H_
+
+#include <linux/types.h>
+
+struct device_node;
+
+/**
+ * enum k3_ring_mode - &struct k3_ring_cfg mode
+ *
+ * RA ring operational modes
+ *
+ * @K3_RINGACC_RING_MODE_RING: Exposed Ring mode for SW direct access
+ * @K3_RINGACC_RING_MODE_MESSAGE: Messaging mode. Messaging mode requires
+ * that all accesses to the queue must go through this IP so that all
+ * accesses to the memory are controlled and ordered. This IP then
+ * controls the entire state of the queue, and SW has no directly control,
+ * such as through doorbells and cannot access the storage memory directly.
+ * This is particularly useful when more than one SW or HW entity can be
+ * the producer and/or consumer at the same time
+ * @K3_RINGACC_RING_MODE_CREDENTIALS: Credentials mode is message mode plus
+ * stores credentials with each message, requiring the element size to be
+ * doubled to fit the credentials. Any exposed memory should be protected
+ * by a firewall from unwanted access
+ */
+enum k3_ring_mode {
+ K3_RINGACC_RING_MODE_RING = 0,
+ K3_RINGACC_RING_MODE_MESSAGE,
+ K3_RINGACC_RING_MODE_CREDENTIALS,
+ K3_RINGACC_RING_MODE_INVALID
+};
+
+/**
+ * enum k3_ring_size - &struct k3_ring_cfg elm_size
+ *
+ * RA ring element's sizes in bytes.
+ */
+enum k3_ring_size {
+ K3_RINGACC_RING_ELSIZE_4 = 0,
+ K3_RINGACC_RING_ELSIZE_8,
+ K3_RINGACC_RING_ELSIZE_16,
+ K3_RINGACC_RING_ELSIZE_32,
+ K3_RINGACC_RING_ELSIZE_64,
+ K3_RINGACC_RING_ELSIZE_128,
+ K3_RINGACC_RING_ELSIZE_256,
+ K3_RINGACC_RING_ELSIZE_INVALID
+};
+
+struct k3_ringacc;
+struct k3_ring;
+
+/**
+ * enum k3_ring_cfg - RA ring configuration structure
+ *
+ * @size: Ring size, number of elements
+ * @elm_size: Ring element size
+ * @mode: Ring operational mode
+ * @flags: Ring configuration flags. Possible values:
+ * @K3_RINGACC_RING_SHARED: when set allows to request the same ring
+ * few times. It's usable when the same ring is used as Free Host PD ring
+ * for different flows, for example.
+ * Note: Locking should be done by consumer if required
+ */
+struct k3_ring_cfg {
+ u32 size;
+ enum k3_ring_size elm_size;
+ enum k3_ring_mode mode;
+#define K3_RINGACC_RING_SHARED BIT(1)
+ u32 flags;
+};
+
+#define K3_RINGACC_RING_ID_ANY (-1)
+
+/**
+ * of_k3_ringacc_get_by_phandle - find a RA by phandle property
+ * @np: device node
+ * @propname: property name containing phandle on RA node
+ *
+ * Returns pointer on the RA - struct k3_ringacc
+ * or -ENODEV if not found,
+ * or -EPROBE_DEFER if not yet registered
+ */
+struct k3_ringacc *of_k3_ringacc_get_by_phandle(struct device_node *np,
+ const char *property);
+
+#define K3_RINGACC_RING_USE_PROXY BIT(1)
+
+/**
+ * k3_ringacc_request_ring - request ring from ringacc
+ * @ringacc: pointer on ringacc
+ * @id: ring id or K3_RINGACC_RING_ID_ANY for any general purpose ring
+ * @flags:
+ * @K3_RINGACC_RING_USE_PROXY: if set - proxy will be allocated and
+ * used to access ring memory. Sopported only for rings in
+ * Message/Credentials/Queue mode.
+ *
+ * Returns pointer on the Ring - struct k3_ring
+ * or NULL in case of failure.
+ */
+struct k3_ring *k3_ringacc_request_ring(struct k3_ringacc *ringacc,
+ int id, u32 flags);
+
+/**
+ * k3_ringacc_ring_reset - ring reset
+ * @ring: pointer on Ring
+ *
+ * Resets ring internal state ((hw)occ, (hw)idx).
+ */
+void k3_ringacc_ring_reset(struct k3_ring *ring);
+/**
+ * k3_ringacc_ring_reset - ring reset for DMA rings
+ * @ring: pointer on Ring
+ *
+ * Resets ring internal state ((hw)occ, (hw)idx). Should be used for rings
+ * which are read by K3 UDMA, like TX or Free Host PD rings.
+ */
+void k3_ringacc_ring_reset_dma(struct k3_ring *ring, u32 occ);
+
+/**
+ * k3_ringacc_ring_free - ring free
+ * @ring: pointer on Ring
+ *
+ * Resets ring and free all alocated resources.
+ */
+int k3_ringacc_ring_free(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_get_ring_id - Get the Ring ID
+ * @ring: pointer on ring
+ *
+ * Returns the Ring ID
+ */
+u32 k3_ringacc_get_ring_id(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_get_ring_irq_num - Get the irq number for the ring
+ * @ring: pointer on ring
+ *
+ * Returns the interrupt number which can be used to request the interrupt
+ */
+int k3_ringacc_get_ring_irq_num(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_ring_cfg - ring configure
+ * @ring: pointer on ring
+ * @cfg: Ring configuration parameters (see &struct k3_ring_cfg)
+ *
+ * Configures ring, including ring memory allocation.
+ * Returns 0 on success, errno otherwise.
+ */
+int k3_ringacc_ring_cfg(struct k3_ring *ring, struct k3_ring_cfg *cfg);
+
+/**
+ * k3_ringacc_ring_get_size - get ring size
+ * @ring: pointer on ring
+ *
+ * Returns ring size in number of elements.
+ */
+u32 k3_ringacc_ring_get_size(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_ring_get_free - get free elements
+ * @ring: pointer on ring
+ *
+ * Returns number of free elements in the ring.
+ */
+u32 k3_ringacc_ring_get_free(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_ring_get_occ - get ring occupancy
+ * @ring: pointer on ring
+ *
+ * Returns total number of valid entries on the ring
+ */
+u32 k3_ringacc_ring_get_occ(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_ring_is_full - checks if ring is full
+ * @ring: pointer on ring
+ *
+ * Returns true if the ring is full
+ */
+u32 k3_ringacc_ring_is_full(struct k3_ring *ring);
+
+/**
+ * k3_ringacc_ring_push - push element to the ring tail
+ * @ring: pointer on ring
+ * @elem: pointer on ring element buffer
+ *
+ * Push one ring element to the ring tail. Size of the ring element is
+ * determined by ring configuration &struct k3_ring_cfg elm_size.
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int k3_ringacc_ring_push(struct k3_ring *ring, void *elem);
+
+/**
+ * k3_ringacc_ring_pop - pop element from the ring head
+ * @ring: pointer on ring
+ * @elem: pointer on ring element buffer
+ *
+ * Push one ring element from the ring head. Size of the ring element is
+ * determined by ring configuration &struct k3_ring_cfg elm_size..
+ *
+ * Returns 0 on success, errno otherwise.
+ */
+int k3_ringacc_ring_pop(struct k3_ring *ring, void *elem);
+
+/**
+ * k3_ringacc_ring_push_head - push element to the ring head
+ * @ring: pointer on ring
+ * @elem: pointer on ring element buffer
+ *
+ * Push one ring element to the ring head. Size of the ring element is
+ * determined by ring configuration &struct k3_ring_cfg elm_size.
+ *
+ * Returns 0 on success, errno otherwise.
+ * Not Supported by ring modes: K3_RINGACC_RING_MODE_RING
+ */
+int k3_ringacc_ring_push_head(struct k3_ring *ring, void *elem);
+
+/**
+ * k3_ringacc_ring_pop_tail - pop element from the ring tail
+ * @ring: pointer on ring
+ * @elem: pointer on ring element buffer
+ *
+ * Push one ring element from the ring tail. Size of the ring element is
+ * determined by ring configuration &struct k3_ring_cfg elm_size.
+ *
+ * Returns 0 on success, errno otherwise.
+ * Not Supported by ring modes: K3_RINGACC_RING_MODE_RING
+ */
+int k3_ringacc_ring_pop_tail(struct k3_ring *ring, void *elem);
+
+u32 k3_ringacc_get_tisci_dev_id(struct k3_ring *ring);
+
+#endif /* __SOC_TI_K3_RINGACC_API_H_ */
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 4049d9755cf1..54338fac45cb 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -378,12 +378,23 @@ extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg,
extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg,
unsigned int vlen, unsigned int flags,
bool forbid_cmsg_compat);
-extern long __sys_sendmsg_sock(struct socket *sock,
- struct user_msghdr __user *msg,
+extern long __sys_sendmsg_sock(struct socket *sock, struct msghdr *msg,
unsigned int flags);
-extern long __sys_recvmsg_sock(struct socket *sock,
- struct user_msghdr __user *msg,
+extern long __sys_recvmsg_sock(struct socket *sock, struct msghdr *msg,
+ struct user_msghdr __user *umsg,
+ struct sockaddr __user *uaddr,
unsigned int flags);
+extern int sendmsg_copy_msghdr(struct msghdr *msg,
+ struct user_msghdr __user *umsg, unsigned flags,
+ struct iovec **iov);
+extern int recvmsg_copy_msghdr(struct msghdr *msg,
+ struct user_msghdr __user *umsg, unsigned flags,
+ struct sockaddr __user **uaddr,
+ struct iovec **iov);
+extern int __copy_msghdr_from_user(struct msghdr *kmsg,
+ struct user_msghdr __user *umsg,
+ struct sockaddr __user **save_addr,
+ struct iovec __user **uiov, size_t *nsegs);
/* helpers which do the actual work for syscalls */
extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size,
@@ -392,10 +403,16 @@ extern int __sys_recvfrom(int fd, void __user *ubuf, size_t size,
extern int __sys_sendto(int fd, void __user *buff, size_t len,
unsigned int flags, struct sockaddr __user *addr,
int addr_len);
+extern int __sys_accept4_file(struct file *file, unsigned file_flags,
+ struct sockaddr __user *upeer_sockaddr,
+ int __user *upeer_addrlen, int flags,
+ unsigned long nofile);
extern int __sys_accept4(int fd, struct sockaddr __user *upeer_sockaddr,
int __user *upeer_addrlen, int flags);
extern int __sys_socket(int family, int type, int protocol);
extern int __sys_bind(int fd, struct sockaddr __user *umyaddr, int addrlen);
+extern int __sys_connect_file(struct file *file, struct sockaddr_storage *addr,
+ int addrlen, int file_flags);
extern int __sys_connect(int fd, struct sockaddr __user *uservaddr,
int addrlen);
extern int __sys_listen(int fd, int backlog);
diff --git a/include/linux/sort.h b/include/linux/sort.h
index 61b96d0ebc44..b5898725fe9d 100644
--- a/include/linux/sort.h
+++ b/include/linux/sort.h
@@ -5,12 +5,12 @@
#include <linux/types.h>
void sort_r(void *base, size_t num, size_t size,
- int (*cmp)(const void *, const void *, const void *),
- void (*swap)(void *, void *, int),
+ cmp_r_func_t cmp_func,
+ swap_func_t swap_func,
const void *priv);
void sort(void *base, size_t num, size_t size,
- int (*cmp)(const void *, const void *),
- void (*swap)(void *, void *, int));
+ cmp_func_t cmp_func,
+ swap_func_t swap_func);
#endif
diff --git a/include/linux/soundwire/sdw.h b/include/linux/soundwire/sdw.h
index ea787201c3ac..b451bb622335 100644
--- a/include/linux/soundwire/sdw.h
+++ b/include/linux/soundwire/sdw.h
@@ -40,9 +40,6 @@ struct sdw_slave;
#define SDW_VALID_PORT_RANGE(n) ((n) <= 14 && (n) >= 1)
-#define SDW_DAI_ID_RANGE_START 100
-#define SDW_DAI_ID_RANGE_END 200
-
enum {
SDW_PORT_DIRN_SINK = 0,
SDW_PORT_DIRN_SOURCE,
@@ -406,6 +403,8 @@ int sdw_slave_read_prop(struct sdw_slave *slave);
* SDW Slave Structures and APIs
*/
+#define SDW_IGNORED_UNIQUE_ID 0xFF
+
/**
* struct sdw_slave_id - Slave ID
* @mfg_id: MIPI Manufacturer ID
@@ -421,7 +420,7 @@ struct sdw_slave_id {
__u16 mfg_id;
__u16 part_id;
__u8 class_id;
- __u8 unique_id:4;
+ __u8 unique_id;
__u8 sdw_version:4;
};
@@ -547,7 +546,22 @@ struct sdw_slave_ops {
* @debugfs: Slave debugfs
* @node: node for bus list
* @port_ready: Port ready completion flag for each Slave port
- * @dev_num: Device Number assigned by Bus
+ * @dev_num: Current Device Number, values can be 0 or dev_num_sticky
+ * @dev_num_sticky: one-time static Device Number assigned by Bus
+ * @probed: boolean tracking driver state
+ * @probe_complete: completion utility to control potential races
+ * on startup between driver probe/initialization and SoundWire
+ * Slave state changes/implementation-defined interrupts
+ * @enumeration_complete: completion utility to control potential races
+ * on startup between device enumeration and read/write access to the
+ * Slave device
+ * @initialization_complete: completion utility to control potential races
+ * on startup between device enumeration and settings being restored
+ * @unattach_request: mask field to keep track why the Slave re-attached and
+ * was re-initialized. This is useful to deal with potential race conditions
+ * between the Master suspending and the codec resuming, and make sure that
+ * when the Master triggered a reset the Slave is properly enumerated and
+ * initialized
*/
struct sdw_slave {
struct sdw_slave_id id;
@@ -562,6 +576,12 @@ struct sdw_slave {
struct list_head node;
struct completion *port_ready;
u16 dev_num;
+ u16 dev_num_sticky;
+ bool probed;
+ struct completion probe_complete;
+ struct completion enumeration_complete;
+ struct completion initialization_complete;
+ u32 unattach_request;
};
#define dev_to_sdw_dev(_dev) container_of(_dev, struct sdw_slave, dev)
diff --git a/include/linux/soundwire/sdw_intel.h b/include/linux/soundwire/sdw_intel.h
index c9427cb6020b..979b41b5dcb4 100644
--- a/include/linux/soundwire/sdw_intel.h
+++ b/include/linux/soundwire/sdw_intel.h
@@ -4,36 +4,185 @@
#ifndef __SDW_INTEL_H
#define __SDW_INTEL_H
+#include <linux/irqreturn.h>
+#include <linux/soundwire/sdw.h>
+
+/**
+ * struct sdw_intel_stream_params_data: configuration passed during
+ * the @params_stream callback, e.g. for interaction with DSP
+ * firmware.
+ */
+struct sdw_intel_stream_params_data {
+ struct snd_pcm_substream *substream;
+ struct snd_soc_dai *dai;
+ struct snd_pcm_hw_params *hw_params;
+ int link_id;
+ int alh_stream_id;
+};
+
+/**
+ * struct sdw_intel_stream_free_data: configuration passed during
+ * the @free_stream callback, e.g. for interaction with DSP
+ * firmware.
+ */
+struct sdw_intel_stream_free_data {
+ struct snd_pcm_substream *substream;
+ struct snd_soc_dai *dai;
+ int link_id;
+};
+
/**
* struct sdw_intel_ops: Intel audio driver callback ops
*
- * @config_stream: configure the stream with the hw_params
- * the first argument containing the context is mandatory
*/
struct sdw_intel_ops {
- int (*config_stream)(void *arg, void *substream,
- void *dai, void *hw_params, int stream_num);
+ int (*params_stream)(struct device *dev,
+ struct sdw_intel_stream_params_data *params_data);
+ int (*free_stream)(struct device *dev,
+ struct sdw_intel_stream_free_data *free_data);
+};
+
+/**
+ * struct sdw_intel_acpi_info - Soundwire Intel information found in ACPI tables
+ * @handle: ACPI controller handle
+ * @count: link count found with "sdw-master-count" property
+ * @link_mask: bit-wise mask listing links enabled by BIOS menu
+ *
+ * this structure could be expanded to e.g. provide all the _ADR
+ * information in case the link_mask is not sufficient to identify
+ * platform capabilities.
+ */
+struct sdw_intel_acpi_info {
+ acpi_handle handle;
+ int count;
+ u32 link_mask;
+};
+
+struct sdw_intel_link_res;
+
+/* Intel clock-stop/pm_runtime quirk definitions */
+
+/*
+ * Force the clock to remain on during pm_runtime suspend. This might
+ * be needed if Slave devices do not have an alternate clock source or
+ * if the latency requirements are very strict.
+ */
+#define SDW_INTEL_CLK_STOP_NOT_ALLOWED BIT(0)
+
+/*
+ * Stop the bus during pm_runtime suspend. If set, a complete bus
+ * reset and re-enumeration will be performed when the bus
+ * restarts. This mode shall not be used if Slave devices can generate
+ * in-band wakes.
+ */
+#define SDW_INTEL_CLK_STOP_TEARDOWN BIT(1)
+
+/*
+ * Stop the bus during pm_suspend if Slaves are not wake capable
+ * (e.g. speaker amplifiers). The clock-stop mode is typically
+ * slightly higher power than when the IP is completely powered-off.
+ */
+#define SDW_INTEL_CLK_STOP_WAKE_CAPABLE_ONLY BIT(2)
+
+/*
+ * Require a bus reset (and complete re-enumeration) when exiting
+ * clock stop modes. This may be needed if the controller power was
+ * turned off and all context lost. This quirk shall not be used if a
+ * Slave device needs to remain enumerated and keep its context,
+ * e.g. to provide the reasons for the wake, report acoustic events or
+ * pass a history buffer.
+ */
+#define SDW_INTEL_CLK_STOP_BUS_RESET BIT(3)
+
+struct sdw_intel_slave_id {
+ int link_id;
+ struct sdw_slave_id id;
};
/**
- * struct sdw_intel_res - Soundwire Intel resource structure
+ * struct sdw_intel_ctx - context allocated by the controller
+ * driver probe
+ * @count: link count
+ * @mmio_base: mmio base of SoundWire registers, only used to check
+ * hardware capabilities after all power dependencies are settled.
+ * @link_mask: bit-wise mask listing SoundWire links reported by the
+ * Controller
+ * @num_slaves: total number of devices exposed across all enabled links
+ * @handle: ACPI parent handle
+ * @links: information for each link (controller-specific and kept
+ * opaque here)
+ * @ids: array of slave_id, representing Slaves exposed across all enabled
+ * links
+ * @link_list: list to handle interrupts across all links
+ * @shim_lock: mutex to handle concurrent rmw access to shared SHIM registers.
+ */
+struct sdw_intel_ctx {
+ int count;
+ void __iomem *mmio_base;
+ u32 link_mask;
+ int num_slaves;
+ acpi_handle handle;
+ struct sdw_intel_link_res *links;
+ struct sdw_intel_slave_id *ids;
+ struct list_head link_list;
+ struct mutex shim_lock; /* lock for access to shared SHIM registers */
+};
+
+/**
+ * struct sdw_intel_res - Soundwire Intel global resource structure,
+ * typically populated by the DSP driver
+ *
+ * @count: link count
* @mmio_base: mmio base of SoundWire registers
* @irq: interrupt number
* @handle: ACPI parent handle
* @parent: parent device
* @ops: callback ops
- * @arg: callback arg
+ * @dev: device implementing hwparams and free callbacks
+ * @link_mask: bit-wise mask listing links selected by the DSP driver
+ * This mask may be a subset of the one reported by the controller since
+ * machine-specific quirks are handled in the DSP driver.
+ * @clock_stop_quirks: mask array of possible behaviors requested by the
+ * DSP driver. The quirks are common for all links for now.
*/
struct sdw_intel_res {
+ int count;
void __iomem *mmio_base;
int irq;
acpi_handle handle;
struct device *parent;
const struct sdw_intel_ops *ops;
- void *arg;
+ struct device *dev;
+ u32 link_mask;
+ u32 clock_stop_quirks;
};
-void *sdw_intel_init(acpi_handle *parent_handle, struct sdw_intel_res *res);
-void sdw_intel_exit(void *arg);
+/*
+ * On Intel platforms, the SoundWire IP has dependencies on power
+ * rails shared with the DSP, and the initialization steps are split
+ * in three. First an ACPI scan to check what the firmware describes
+ * in DSDT tables, then an allocation step (with no hardware
+ * configuration but with all the relevant devices created) and last
+ * the actual hardware configuration. The final stage is a global
+ * interrupt enable which is controlled by the DSP driver. Splitting
+ * these phases helps simplify the boot flow and make early decisions
+ * on e.g. which machine driver to select (I2S mode, HDaudio or
+ * SoundWire).
+ */
+int sdw_intel_acpi_scan(acpi_handle *parent_handle,
+ struct sdw_intel_acpi_info *info);
+
+void sdw_intel_process_wakeen_event(struct sdw_intel_ctx *ctx);
+
+struct sdw_intel_ctx *
+sdw_intel_probe(struct sdw_intel_res *res);
+
+int sdw_intel_startup(struct sdw_intel_ctx *ctx);
+
+void sdw_intel_exit(struct sdw_intel_ctx *ctx);
+
+void sdw_intel_enable_irq(void __iomem *mmio_base, bool enable);
+
+irqreturn_t sdw_intel_thread(int irq, void *dev_id);
#endif
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index af4f265d0f67..38286de779e3 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -13,6 +13,7 @@
#include <linux/completion.h>
#include <linux/scatterlist.h>
#include <linux/gpio/consumer.h>
+#include <linux/ptp_clock_kernel.h>
struct dma_chan;
struct property_entry;
@@ -90,6 +91,22 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1)
/**
+ * struct spi_delay - SPI delay information
+ * @value: Value for the delay
+ * @unit: Unit for the delay
+ */
+struct spi_delay {
+#define SPI_DELAY_UNIT_USECS 0
+#define SPI_DELAY_UNIT_NSECS 1
+#define SPI_DELAY_UNIT_SCK 2
+ u16 value;
+ u8 unit;
+};
+
+extern int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer);
+extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
+
+/**
* struct spi_device - Controller side proxy for an SPI slave device
* @dev: Driver model representation of the device.
* @controller: SPI controller used with the device.
@@ -118,12 +135,14 @@ void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
* @modalias: Name of the driver to use with this device, or an alias
* for that name. This appears in the sysfs "modalias" attribute
* for driver coldplugging, and in uevents used for hotplugging
+ * @driver_override: If the name of a driver is written to this attribute, then
+ * the device will bind to the named driver and only the named driver.
* @cs_gpio: LEGACY: gpio number of the chipselect line (optional, -ENOENT when
* not using a GPIO line) use cs_gpiod in new drivers by opting in on
* the spi_master.
* @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when
* not using a GPIO line)
- * @word_delay_usecs: microsecond delay to be inserted between consecutive
+ * @word_delay: delay to be inserted between consecutive
* words of a transfer
*
* @statistics: statistics for the spi_device
@@ -173,7 +192,7 @@ struct spi_device {
const char *driver_override;
int cs_gpio; /* LEGACY: chip select gpio */
struct gpio_desc *cs_gpiod; /* chip select gpio desc */
- uint8_t word_delay_usecs; /* inter-word delay */
+ struct spi_delay word_delay; /* inter-word delay */
/* the statistics */
struct spi_statistics statistics;
@@ -390,6 +409,11 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* controller has native support for memory like operations.
* @unprepare_message: undo any work done by prepare_message().
* @slave_abort: abort the ongoing transfer request on an SPI slave controller
+ * @cs_setup: delay to be introduced by the controller after CS is asserted
+ * @cs_hold: delay to be introduced by the controller before CS is deasserted
+ * @cs_inactive: delay to be introduced by the controller after CS is
+ * deasserted. If @cs_change_delay is used from @spi_transfer, then the
+ * two delays will be added up.
* @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per
* CS number. Any individual value may be -ENOENT for CS lines that
* are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods
@@ -401,6 +425,12 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* GPIO descriptors rather than using global GPIO numbers grabbed by the
* driver. This will fill in @cs_gpiods and @cs_gpios should not be used,
* and SPI devices will have the cs_gpiod assigned rather than cs_gpio.
+ * @unused_native_cs: When cs_gpiods is used, spi_register_controller() will
+ * fill in this field with the first unused native CS, to be used by SPI
+ * controller drivers that need to drive a native CS when using GPIO CS.
+ * @max_native_cs: When cs_gpiods is used, and this field is filled in,
+ * spi_register_controller() will validate all native CS (including the
+ * unused native CS) against this value.
* @statistics: statistics for the spi_controller
* @dma_tx: DMA transmit channel
* @dma_rx: DMA receive channel
@@ -409,6 +439,13 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @fw_translate_cs: If the boot firmware uses different numbering scheme
* what Linux expects, this optional hook can be used to translate
* between the two.
+ * @ptp_sts_supported: If the driver sets this to true, it must provide a
+ * time snapshot in @spi_transfer->ptp_sts as close as possible to the
+ * moment in time when @spi_transfer->ptp_sts_word_pre and
+ * @spi_transfer->ptp_sts_word_post were transmitted.
+ * If the driver does not set this, the SPI core takes the snapshot as
+ * close to the driver hand-over as possible.
+ * @irq_flags: Interrupt enable state during PTP system timestamping
*
* Each SPI controller can communicate with one or more @spi_device
* children. These make a small bus, sharing MOSI, MISO and SCK signals
@@ -447,6 +484,9 @@ struct spi_controller {
/* spi_device.mode flags understood by this controller driver */
u32 mode_bits;
+ /* spi_device.mode flags override flags for this controller */
+ u32 buswidth_override_bits;
+
/* bitmask of supported bits_per_word for transfers */
u32 bits_per_word_mask;
#define SPI_BPW_MASK(bits) BIT((bits) - 1)
@@ -502,8 +542,8 @@ struct spi_controller {
* to configure specific CS timing through spi_set_cs_timing() after
* spi_setup().
*/
- void (*set_cs_timing)(struct spi_device *spi, u8 setup_clk_cycles,
- u8 hold_clk_cycles, u8 inactive_clk_cycles);
+ int (*set_cs_timing)(struct spi_device *spi, struct spi_delay *setup,
+ struct spi_delay *hold, struct spi_delay *inactive);
/* bidirectional bulk transfers
*
@@ -587,10 +627,17 @@ struct spi_controller {
/* Optimized handlers for SPI memory-like operations. */
const struct spi_controller_mem_ops *mem_ops;
+ /* CS delays */
+ struct spi_delay cs_setup;
+ struct spi_delay cs_hold;
+ struct spi_delay cs_inactive;
+
/* gpio chip select */
int *cs_gpios;
struct gpio_desc **cs_gpiods;
bool use_gpio_descriptors;
+ u8 unused_native_cs;
+ u8 max_native_cs;
/* statistics */
struct spi_statistics statistics;
@@ -604,6 +651,15 @@ struct spi_controller {
void *dummy_tx;
int (*fw_translate_cs)(struct spi_controller *ctlr, unsigned cs);
+
+ /*
+ * Driver sets this field to indicate it is able to snapshot SPI
+ * transfers (needed e.g. for reading the time of POSIX clocks)
+ */
+ bool ptp_sts_supported;
+
+ /* Interrupt enable state during PTP system timestamping */
+ unsigned long irq_flags;
};
static inline void *spi_controller_get_devdata(struct spi_controller *ctlr)
@@ -644,6 +700,14 @@ extern struct spi_message *spi_get_next_queued_message(struct spi_controller *ct
extern void spi_finalize_current_message(struct spi_controller *ctlr);
extern void spi_finalize_current_transfer(struct spi_controller *ctlr);
+/* Helper calls for driver to timestamp transfer */
+void spi_take_timestamp_pre(struct spi_controller *ctlr,
+ struct spi_transfer *xfer,
+ size_t progress, bool irqs_off);
+void spi_take_timestamp_post(struct spi_controller *ctlr,
+ struct spi_transfer *xfer,
+ size_t progress, bool irqs_off);
+
/* the spi driver core manages memory for the spi_controller classdev */
extern struct spi_controller *__spi_alloc_controller(struct device *host,
unsigned int size, bool slave);
@@ -739,13 +803,13 @@ extern void spi_res_release(struct spi_controller *ctlr,
* @cs_change: affects chipselect after this transfer completes
* @cs_change_delay: delay between cs deassert and assert when
* @cs_change is set and @spi_transfer is not the last in @spi_message
- * @cs_change_delay_unit: unit of cs_change_delay
+ * @delay: delay to be introduced after this transfer before
+ * (optionally) changing the chipselect status, then starting
+ * the next transfer or completing this @spi_message.
* @delay_usecs: microseconds to delay after this transfer before
* (optionally) changing the chipselect status, then starting
* the next transfer or completing this @spi_message.
- * @word_delay_usecs: microseconds to inter word delay after each word size
- * (set by bits_per_word) transmission.
- * @word_delay: clock cycles to inter word delay after each word size
+ * @word_delay: inter word delay to be introduced after each word size
* (set by bits_per_word) transmission.
* @effective_speed_hz: the effective SCK-speed that was used to
* transfer this transfer. Set to 0 if the spi bus driver does
@@ -753,6 +817,35 @@ extern void spi_res_release(struct spi_controller *ctlr,
* @transfer_list: transfers are sequenced through @spi_message.transfers
* @tx_sg: Scatterlist for transmit, currently not for client use
* @rx_sg: Scatterlist for receive, currently not for client use
+ * @ptp_sts_word_pre: The word (subject to bits_per_word semantics) offset
+ * within @tx_buf for which the SPI device is requesting that the time
+ * snapshot for this transfer begins. Upon completing the SPI transfer,
+ * this value may have changed compared to what was requested, depending
+ * on the available snapshotting resolution (DMA transfer,
+ * @ptp_sts_supported is false, etc).
+ * @ptp_sts_word_post: See @ptp_sts_word_post. The two can be equal (meaning
+ * that a single byte should be snapshotted).
+ * If the core takes care of the timestamp (if @ptp_sts_supported is false
+ * for this controller), it will set @ptp_sts_word_pre to 0, and
+ * @ptp_sts_word_post to the length of the transfer. This is done
+ * purposefully (instead of setting to spi_transfer->len - 1) to denote
+ * that a transfer-level snapshot taken from within the driver may still
+ * be of higher quality.
+ * @ptp_sts: Pointer to a memory location held by the SPI slave device where a
+ * PTP system timestamp structure may lie. If drivers use PIO or their
+ * hardware has some sort of assist for retrieving exact transfer timing,
+ * they can (and should) assert @ptp_sts_supported and populate this
+ * structure using the ptp_read_system_*ts helper functions.
+ * The timestamp must represent the time at which the SPI slave device has
+ * processed the word, i.e. the "pre" timestamp should be taken before
+ * transmitting the "pre" word, and the "post" timestamp after receiving
+ * transmit confirmation from the controller for the "post" word.
+ * @timestamped_pre: Set by the SPI controller driver to denote it has acted
+ * upon the @ptp_sts request. Not set when the SPI core has taken care of
+ * the task. SPI device drivers are free to print a warning if this comes
+ * back unset and they need the better resolution.
+ * @timestamped_post: See above. The reason why both exist is that these
+ * booleans are also used to keep state in the core SPI logic.
*
* SPI transfers always write the same number of bytes as they read.
* Protocol drivers should always provide @rx_buf and/or @tx_buf.
@@ -830,18 +923,21 @@ struct spi_transfer {
#define SPI_NBITS_DUAL 0x02 /* 2bits transfer */
#define SPI_NBITS_QUAD 0x04 /* 4bits transfer */
u8 bits_per_word;
- u8 word_delay_usecs;
u16 delay_usecs;
- u16 cs_change_delay;
- u8 cs_change_delay_unit;
-#define SPI_DELAY_UNIT_USECS 0
-#define SPI_DELAY_UNIT_NSECS 1
-#define SPI_DELAY_UNIT_SCK 2
+ struct spi_delay delay;
+ struct spi_delay cs_change_delay;
+ struct spi_delay word_delay;
u32 speed_hz;
- u16 word_delay;
u32 effective_speed_hz;
+ unsigned int ptp_sts_word_pre;
+ unsigned int ptp_sts_word_post;
+
+ struct ptp_system_timestamp *ptp_sts;
+
+ bool timestamped;
+
struct list_head transfer_list;
};
@@ -935,6 +1031,20 @@ spi_transfer_del(struct spi_transfer *t)
list_del(&t->transfer_list);
}
+static inline int
+spi_transfer_delay_exec(struct spi_transfer *t)
+{
+ struct spi_delay d;
+
+ if (t->delay_usecs) {
+ d.value = t->delay_usecs;
+ d.unit = SPI_DELAY_UNIT_USECS;
+ return spi_delay_exec(&d, NULL);
+ }
+
+ return spi_delay_exec(&t->delay, t);
+}
+
/**
* spi_message_init_with_transfers - Initialize spi_message and append transfers
* @m: spi_message to be initialized
@@ -982,7 +1092,10 @@ static inline void spi_message_free(struct spi_message *m)
kfree(m);
}
-extern void spi_set_cs_timing(struct spi_device *spi, u8 setup, u8 hold, u8 inactive_dly);
+extern int spi_set_cs_timing(struct spi_device *spi,
+ struct spi_delay *setup,
+ struct spi_delay *hold,
+ struct spi_delay *inactive);
extern int spi_setup(struct spi_device *spi);
extern int spi_async(struct spi_device *spi, struct spi_message *message);
diff --git a/include/linux/spi/spi_oc_tiny.h b/include/linux/spi/spi_oc_tiny.h
index a3ecf2feadf2..284872ac130c 100644
--- a/include/linux/spi/spi_oc_tiny.h
+++ b/include/linux/spi/spi_oc_tiny.h
@@ -6,16 +6,12 @@
* struct tiny_spi_platform_data - platform data of the OpenCores tiny SPI
* @freq: input clock freq to the core.
* @baudwidth: baud rate divider width of the core.
- * @gpio_cs_count: number of gpio pins used for chipselect.
- * @gpio_cs: array of gpio pins used for chipselect.
*
* freq and baudwidth are used only if the divider is programmable.
*/
struct tiny_spi_platform_data {
unsigned int freq;
unsigned int baudwidth;
- unsigned int gpio_cs_count;
- int *gpio_cs;
};
#endif /* _LINUX_SPI_SPI_OC_TINY_H */
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 031ce8617df8..d3770b3f9d9a 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -93,12 +93,13 @@
#ifdef CONFIG_DEBUG_SPINLOCK
extern void __raw_spin_lock_init(raw_spinlock_t *lock, const char *name,
- struct lock_class_key *key);
-# define raw_spin_lock_init(lock) \
-do { \
- static struct lock_class_key __key; \
- \
- __raw_spin_lock_init((lock), #lock, &__key); \
+ struct lock_class_key *key, short inner);
+
+# define raw_spin_lock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN); \
} while (0)
#else
@@ -327,12 +328,26 @@ static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
return &lock->rlock;
}
-#define spin_lock_init(_lock) \
-do { \
- spinlock_check(_lock); \
- raw_spin_lock_init(&(_lock)->rlock); \
+#ifdef CONFIG_DEBUG_SPINLOCK
+
+# define spin_lock_init(lock) \
+do { \
+ static struct lock_class_key __key; \
+ \
+ __raw_spin_lock_init(spinlock_check(lock), \
+ #lock, &__key, LD_WAIT_CONFIG); \
+} while (0)
+
+#else
+
+# define spin_lock_init(_lock) \
+do { \
+ spinlock_check(_lock); \
+ *(_lock) = __SPIN_LOCK_UNLOCKED(_lock); \
} while (0)
+#endif
+
static __always_inline void spin_lock(spinlock_t *lock)
{
raw_spin_lock(&lock->rlock);
diff --git a/include/linux/spinlock_api_smp.h b/include/linux/spinlock_api_smp.h
index b762eaba4cdf..19a9be9d97ee 100644
--- a/include/linux/spinlock_api_smp.h
+++ b/include/linux/spinlock_api_smp.h
@@ -147,7 +147,7 @@ static inline void __raw_spin_lock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock(raw_spinlock_t *lock)
{
- spin_release(&lock->dep_map, 1, _RET_IP_);
+ spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
preempt_enable();
}
@@ -155,7 +155,7 @@ static inline void __raw_spin_unlock(raw_spinlock_t *lock)
static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
unsigned long flags)
{
- spin_release(&lock->dep_map, 1, _RET_IP_);
+ spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
local_irq_restore(flags);
preempt_enable();
@@ -163,7 +163,7 @@ static inline void __raw_spin_unlock_irqrestore(raw_spinlock_t *lock,
static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
{
- spin_release(&lock->dep_map, 1, _RET_IP_);
+ spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
local_irq_enable();
preempt_enable();
@@ -171,7 +171,7 @@ static inline void __raw_spin_unlock_irq(raw_spinlock_t *lock)
static inline void __raw_spin_unlock_bh(raw_spinlock_t *lock)
{
- spin_release(&lock->dep_map, 1, _RET_IP_);
+ spin_release(&lock->dep_map, _RET_IP_);
do_raw_spin_unlock(lock);
__local_bh_enable_ip(_RET_IP_, SOFTIRQ_LOCK_OFFSET);
}
diff --git a/include/linux/spinlock_types.h b/include/linux/spinlock_types.h
index 24b4e6f2c1a2..6102e6bff3ae 100644
--- a/include/linux/spinlock_types.h
+++ b/include/linux/spinlock_types.h
@@ -33,8 +33,18 @@ typedef struct raw_spinlock {
#define SPINLOCK_OWNER_INIT ((void *)-1L)
#ifdef CONFIG_DEBUG_LOCK_ALLOC
-# define SPIN_DEP_MAP_INIT(lockname) .dep_map = { .name = #lockname }
+# define RAW_SPIN_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_SPIN, \
+ }
+# define SPIN_DEP_MAP_INIT(lockname) \
+ .dep_map = { \
+ .name = #lockname, \
+ .wait_type_inner = LD_WAIT_CONFIG, \
+ }
#else
+# define RAW_SPIN_DEP_MAP_INIT(lockname)
# define SPIN_DEP_MAP_INIT(lockname)
#endif
@@ -51,7 +61,7 @@ typedef struct raw_spinlock {
{ \
.raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
SPIN_DEBUG_INIT(lockname) \
- SPIN_DEP_MAP_INIT(lockname) }
+ RAW_SPIN_DEP_MAP_INIT(lockname) }
#define __RAW_SPIN_LOCK_UNLOCKED(lockname) \
(raw_spinlock_t) __RAW_SPIN_LOCK_INITIALIZER(lockname)
@@ -72,11 +82,17 @@ typedef struct spinlock {
};
} spinlock_t;
+#define ___SPIN_LOCK_INITIALIZER(lockname) \
+ { \
+ .raw_lock = __ARCH_SPIN_LOCK_UNLOCKED, \
+ SPIN_DEBUG_INIT(lockname) \
+ SPIN_DEP_MAP_INIT(lockname) }
+
#define __SPIN_LOCK_INITIALIZER(lockname) \
- { { .rlock = __RAW_SPIN_LOCK_INITIALIZER(lockname) } }
+ { { .rlock = ___SPIN_LOCK_INITIALIZER(lockname) } }
#define __SPIN_LOCK_UNLOCKED(lockname) \
- (spinlock_t ) __SPIN_LOCK_INITIALIZER(lockname)
+ (spinlock_t) __SPIN_LOCK_INITIALIZER(lockname)
#define DEFINE_SPINLOCK(x) spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 74b4911ac16d..ebbbfea48aa0 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -78,6 +78,9 @@ extern ssize_t add_to_pipe(struct pipe_inode_info *,
struct pipe_buffer *);
extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
splice_direct_actor *);
+extern long do_splice(struct file *in, loff_t __user *off_in,
+ struct file *out, loff_t __user *off_out,
+ size_t len, unsigned int flags);
/*
* for dynamic pipe sizing
diff --git a/include/linux/stackprotector.h b/include/linux/stackprotector.h
index 6b792d080eee..4c678c4fec58 100644
--- a/include/linux/stackprotector.h
+++ b/include/linux/stackprotector.h
@@ -6,7 +6,7 @@
#include <linux/sched.h>
#include <linux/random.h>
-#ifdef CONFIG_STACKPROTECTOR
+#if defined(CONFIG_STACKPROTECTOR) || defined(CONFIG_ARM64_PTR_AUTH)
# include <asm/stackprotector.h>
#else
static inline void boot_init_stack_canary(void)
diff --git a/include/linux/stat.h b/include/linux/stat.h
index 765573dc17d6..528c4baad091 100644
--- a/include/linux/stat.h
+++ b/include/linux/stat.h
@@ -33,7 +33,8 @@ struct kstat {
STATX_ATTR_IMMUTABLE | \
STATX_ATTR_APPEND | \
STATX_ATTR_NODUMP | \
- STATX_ATTR_ENCRYPTED \
+ STATX_ATTR_ENCRYPTED | \
+ STATX_ATTR_VERITY \
)/* Attrs corresponding to FS_*_FL flags */
u64 ino;
dev_t dev;
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index dc60d03c4b60..19190c609282 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -13,6 +13,7 @@
#define __STMMAC_PLATFORM_DATA
#include <linux/platform_device.h>
+#include <linux/phy.h>
#define MTL_MAX_RX_QUEUES 8
#define MTL_MAX_TX_QUEUES 8
@@ -92,6 +93,7 @@ struct stmmac_dma_cfg {
int fixed_burst;
int mixed_burst;
bool aal;
+ bool eame;
};
#define AXI_BLEN 7
@@ -107,6 +109,18 @@ struct stmmac_axi {
bool axi_rb;
};
+#define EST_GCL 1024
+struct stmmac_est {
+ int enable;
+ u32 btr_offset[2];
+ u32 btr[2];
+ u32 ctr[2];
+ u32 ter;
+ u32 gcl_unaligned[EST_GCL];
+ u32 gcl[EST_GCL];
+ u32 gcl_size;
+};
+
struct stmmac_rxq_cfg {
u8 mode_to_use;
u32 chan;
@@ -125,18 +139,20 @@ struct stmmac_txq_cfg {
u32 low_credit;
bool use_prio;
u32 prio;
+ int tbs_en;
};
struct plat_stmmacenet_data {
int bus_id;
int phy_addr;
int interface;
- int phy_interface;
+ phy_interface_t phy_interface;
struct stmmac_mdio_bus_data *mdio_bus_data;
struct device_node *phy_node;
struct device_node *phylink_node;
struct device_node *mdio_node;
struct stmmac_dma_cfg *dma_cfg;
+ struct stmmac_est *est;
int clk_csr;
int has_gmac;
int enh_desc;
diff --git a/include/linux/stop_machine.h b/include/linux/stop_machine.h
index f9a0c6189852..76d8b09384a7 100644
--- a/include/linux/stop_machine.h
+++ b/include/linux/stop_machine.h
@@ -32,8 +32,6 @@ int stop_one_cpu(unsigned int cpu, cpu_stop_fn_t fn, void *arg);
int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *arg);
bool stop_one_cpu_nowait(unsigned int cpu, cpu_stop_fn_t fn, void *arg,
struct cpu_stop_work *work_buf);
-int stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
-int try_stop_cpus(const struct cpumask *cpumask, cpu_stop_fn_t fn, void *arg);
void stop_machine_park(int cpu);
void stop_machine_unpark(int cpu);
void stop_machine_yield(const struct cpumask *cpumask);
@@ -82,20 +80,6 @@ static inline bool stop_one_cpu_nowait(unsigned int cpu,
return false;
}
-static inline int stop_cpus(const struct cpumask *cpumask,
- cpu_stop_fn_t fn, void *arg)
-{
- if (cpumask_test_cpu(raw_smp_processor_id(), cpumask))
- return stop_one_cpu(raw_smp_processor_id(), fn, arg);
- return -ENOENT;
-}
-
-static inline int try_stop_cpus(const struct cpumask *cpumask,
- cpu_stop_fn_t fn, void *arg)
-{
- return stop_cpus(cpumask, fn, arg);
-}
-
#endif /* CONFIG_SMP */
/*
diff --git a/include/linux/string.h b/include/linux/string.h
index b6ccdc2c7f02..6dfbb2efa815 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -62,6 +62,7 @@ extern char * strchr(const char *,int);
#ifndef __HAVE_ARCH_STRCHRNUL
extern char * strchrnul(const char *,int);
#endif
+extern char * strnchrnul(const char *, size_t, int);
#ifndef __HAVE_ARCH_STRNCHR
extern char * strnchr(const char *, size_t, int);
#endif
@@ -216,6 +217,8 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
const void *from, size_t available);
+int ptr_to_hashval(const void *ptr, unsigned long *hashval_out);
+
/**
* strstarts - does @str start with @prefix?
* @str: string to examine
diff --git a/include/linux/sunrpc/auth.h b/include/linux/sunrpc/auth.h
index 5f9076fdb090..4f6b28487f28 100644
--- a/include/linux/sunrpc/auth.h
+++ b/include/linux/sunrpc/auth.h
@@ -10,8 +10,6 @@
#ifndef _LINUX_SUNRPC_AUTH_H
#define _LINUX_SUNRPC_AUTH_H
-#ifdef __KERNEL__
-
#include <linux/sunrpc/sched.h>
#include <linux/sunrpc/msg_prot.h>
#include <linux/sunrpc/xdr.h>
@@ -115,7 +113,6 @@ struct rpc_authops {
int (*hash_cred)(struct auth_cred *, unsigned int);
struct rpc_cred * (*lookup_cred)(struct rpc_auth *, struct auth_cred *, int);
struct rpc_cred * (*crcreate)(struct rpc_auth*, struct auth_cred *, int, gfp_t);
- int (*list_pseudoflavors)(rpc_authflavor_t *, int);
rpc_authflavor_t (*info2flavor)(struct rpcsec_gss_info *);
int (*flavor2info)(rpc_authflavor_t,
struct rpcsec_gss_info *);
@@ -160,7 +157,6 @@ rpc_authflavor_t rpcauth_get_pseudoflavor(rpc_authflavor_t,
struct rpcsec_gss_info *);
int rpcauth_get_gssinfo(rpc_authflavor_t,
struct rpcsec_gss_info *);
-int rpcauth_list_flavors(rpc_authflavor_t *, int);
struct rpc_cred * rpcauth_lookup_credcache(struct rpc_auth *, struct auth_cred *, int, gfp_t);
void rpcauth_init_cred(struct rpc_cred *, const struct auth_cred *, struct rpc_auth *, const struct rpc_credops *);
struct rpc_cred * rpcauth_lookupcred(struct rpc_auth *, int);
@@ -194,5 +190,4 @@ struct rpc_cred *get_rpccred(struct rpc_cred *cred)
return NULL;
}
-#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_AUTH_H */
diff --git a/include/linux/sunrpc/auth_gss.h b/include/linux/sunrpc/auth_gss.h
index 30427b729070..43e481aa347a 100644
--- a/include/linux/sunrpc/auth_gss.h
+++ b/include/linux/sunrpc/auth_gss.h
@@ -13,7 +13,6 @@
#ifndef _LINUX_SUNRPC_AUTH_GSS_H
#define _LINUX_SUNRPC_AUTH_GSS_H
-#ifdef __KERNEL__
#include <linux/refcount.h>
#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/svc.h>
@@ -90,6 +89,5 @@ struct gss_cred {
unsigned long gc_upcall_timestamp;
};
-#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_AUTH_GSS_H */
diff --git a/include/linux/sunrpc/cache.h b/include/linux/sunrpc/cache.h
index f8603724fbee..0f64de7caa39 100644
--- a/include/linux/sunrpc/cache.h
+++ b/include/linux/sunrpc/cache.h
@@ -45,8 +45,8 @@
*/
struct cache_head {
struct hlist_node cache_list;
- time_t expiry_time; /* After time time, don't use the data */
- time_t last_refresh; /* If CACHE_PENDING, this is when upcall was
+ time64_t expiry_time; /* After time time, don't use the data */
+ time64_t last_refresh; /* If CACHE_PENDING, this is when upcall was
* sent, else this is when update was
* received, though it is alway set to
* be *after* ->flush_time.
@@ -95,22 +95,22 @@ struct cache_detail {
/* fields below this comment are for internal use
* and should not be touched by cache owners
*/
- time_t flush_time; /* flush all cache items with
+ time64_t flush_time; /* flush all cache items with
* last_refresh at or earlier
* than this. last_refresh
* is never set at or earlier
* than this.
*/
struct list_head others;
- time_t nextcheck;
+ time64_t nextcheck;
int entries;
/* fields for communication over channel */
struct list_head queue;
atomic_t writers; /* how many time is /channel open */
- time_t last_close; /* if no writers, when did last close */
- time_t last_warn; /* when we last warned about no writers */
+ time64_t last_close; /* if no writers, when did last close */
+ time64_t last_warn; /* when we last warned about no writers */
union {
struct proc_dir_entry *procfs;
@@ -147,18 +147,22 @@ struct cache_deferred_req {
* timestamps kept in the cache are expressed in seconds
* since boot. This is the best for measuring differences in
* real time.
+ * This reimplemnts ktime_get_boottime_seconds() in a slightly
+ * faster but less accurate way. When we end up converting
+ * back to wallclock (CLOCK_REALTIME), that error often
+ * cancels out during the reverse operation.
*/
-static inline time_t seconds_since_boot(void)
+static inline time64_t seconds_since_boot(void)
{
- struct timespec boot;
- getboottime(&boot);
- return get_seconds() - boot.tv_sec;
+ struct timespec64 boot;
+ getboottime64(&boot);
+ return ktime_get_real_seconds() - boot.tv_sec;
}
-static inline time_t convert_to_wallclock(time_t sinceboot)
+static inline time64_t convert_to_wallclock(time64_t sinceboot)
{
- struct timespec boot;
- getboottime(&boot);
+ struct timespec64 boot;
+ getboottime64(&boot);
return boot.tv_sec + sinceboot;
}
@@ -273,7 +277,7 @@ static inline int get_uint(char **bpp, unsigned int *anint)
return 0;
}
-static inline int get_time(char **bpp, time_t *time)
+static inline int get_time(char **bpp, time64_t *time)
{
char buf[50];
long long ll;
@@ -287,20 +291,20 @@ static inline int get_time(char **bpp, time_t *time)
if (kstrtoll(buf, 0, &ll))
return -EINVAL;
- *time = (time_t)ll;
+ *time = ll;
return 0;
}
-static inline time_t get_expiry(char **bpp)
+static inline time64_t get_expiry(char **bpp)
{
- time_t rv;
- struct timespec boot;
+ time64_t rv;
+ struct timespec64 boot;
if (get_time(bpp, &rv))
return 0;
if (rv < 0)
return 0;
- getboottime(&boot);
+ getboottime64(&boot);
return rv - boot.tv_sec;
}
diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
index abc63bd1be2b..ca7e108248e2 100644
--- a/include/linux/sunrpc/clnt.h
+++ b/include/linux/sunrpc/clnt.h
@@ -109,8 +109,6 @@ struct rpc_procinfo {
const char * p_name; /* name of procedure */
};
-#ifdef __KERNEL__
-
struct rpc_create_args {
struct net *net;
int protocol;
@@ -149,6 +147,7 @@ struct rpc_add_xprt_test {
#define RPC_CLNT_CREATE_NO_IDLE_TIMEOUT (1UL << 8)
#define RPC_CLNT_CREATE_NO_RETRANS_TIMEOUT (1UL << 9)
#define RPC_CLNT_CREATE_SOFTERR (1UL << 10)
+#define RPC_CLNT_CREATE_REUSEPORT (1UL << 11)
struct rpc_clnt *rpc_create(struct rpc_create_args *args);
struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *,
@@ -237,5 +236,4 @@ static inline int rpc_reply_expected(struct rpc_task *task)
(task->tk_msg.rpc_proc->p_decode != NULL);
}
-#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_CLNT_H */
diff --git a/include/linux/sunrpc/gss_api.h b/include/linux/sunrpc/gss_api.h
index 5ac5db4d295f..48c1b1674cbf 100644
--- a/include/linux/sunrpc/gss_api.h
+++ b/include/linux/sunrpc/gss_api.h
@@ -13,7 +13,6 @@
#ifndef _LINUX_SUNRPC_GSS_API_H
#define _LINUX_SUNRPC_GSS_API_H
-#ifdef __KERNEL__
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/msg_prot.h>
#include <linux/uio.h>
@@ -49,7 +48,7 @@ int gss_import_sec_context(
size_t bufsize,
struct gss_api_mech *mech,
struct gss_ctx **ctx_id,
- time_t *endtime,
+ time64_t *endtime,
gfp_t gfp_mask);
u32 gss_get_mic(
struct gss_ctx *ctx_id,
@@ -109,7 +108,7 @@ struct gss_api_ops {
const void *input_token,
size_t bufsize,
struct gss_ctx *ctx_id,
- time_t *endtime,
+ time64_t *endtime,
gfp_t gfp_mask);
u32 (*gss_get_mic)(
struct gss_ctx *ctx_id,
@@ -151,15 +150,11 @@ struct gss_api_mech *gss_mech_get_by_name(const char *);
/* Similar, but get by pseudoflavor. */
struct gss_api_mech *gss_mech_get_by_pseudoflavor(u32);
-/* Fill in an array with a list of supported pseudoflavors */
-int gss_mech_list_pseudoflavors(rpc_authflavor_t *, int);
-
struct gss_api_mech * gss_mech_get(struct gss_api_mech *);
/* For every successful gss_mech_get or gss_mech_get_by_* call there must be a
* corresponding call to gss_mech_put. */
void gss_mech_put(struct gss_api_mech *);
-#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_GSS_API_H */
diff --git a/include/linux/sunrpc/gss_err.h b/include/linux/sunrpc/gss_err.h
index a6807867bd21..b73c329c83f2 100644
--- a/include/linux/sunrpc/gss_err.h
+++ b/include/linux/sunrpc/gss_err.h
@@ -34,8 +34,6 @@
#ifndef _LINUX_SUNRPC_GSS_ERR_H
#define _LINUX_SUNRPC_GSS_ERR_H
-#ifdef __KERNEL__
-
typedef unsigned int OM_uint32;
/*
@@ -163,5 +161,4 @@ typedef unsigned int OM_uint32;
/* XXXX This is a necessary evil until the spec is fixed */
#define GSS_S_CRED_UNAVAIL GSS_S_FAILURE
-#endif /* __KERNEL__ */
#endif /* __LINUX_SUNRPC_GSS_ERR_H */
diff --git a/include/linux/sunrpc/gss_krb5.h b/include/linux/sunrpc/gss_krb5.h
index 02c0412e368c..c1d77dd8ed41 100644
--- a/include/linux/sunrpc/gss_krb5.h
+++ b/include/linux/sunrpc/gss_krb5.h
@@ -106,9 +106,9 @@ struct krb5_ctx {
struct crypto_sync_skcipher *initiator_enc_aux;
u8 Ksess[GSS_KRB5_MAX_KEYLEN]; /* session key */
u8 cksum[GSS_KRB5_MAX_KEYLEN];
- s32 endtime;
atomic_t seq_send;
atomic64_t seq_send64;
+ time64_t endtime;
struct xdr_netobj mech_used;
u8 initiator_sign[GSS_KRB5_MAX_KEYLEN];
u8 acceptor_sign[GSS_KRB5_MAX_KEYLEN];
diff --git a/include/linux/sunrpc/msg_prot.h b/include/linux/sunrpc/msg_prot.h
index 4722b28ec36a..bea40d9f03a1 100644
--- a/include/linux/sunrpc/msg_prot.h
+++ b/include/linux/sunrpc/msg_prot.h
@@ -8,8 +8,6 @@
#ifndef _LINUX_SUNRPC_MSGPROT_H_
#define _LINUX_SUNRPC_MSGPROT_H_
-#ifdef __KERNEL__ /* user programs should get these from the rpc header files */
-
#define RPC_VERSION 2
/* size of an XDR encoding unit in bytes, i.e. 32bit */
@@ -217,5 +215,4 @@ typedef __be32 rpc_fraghdr;
/* Assume INET6_ADDRSTRLEN will always be larger than INET_ADDRSTRLEN... */
#define RPCBIND_MAXUADDRLEN RPCBIND_MAXUADDR6LEN
-#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_MSGPROT_H_ */
diff --git a/include/linux/sunrpc/rpc_pipe_fs.h b/include/linux/sunrpc/rpc_pipe_fs.h
index e90b9bd99ded..cd188a527d16 100644
--- a/include/linux/sunrpc/rpc_pipe_fs.h
+++ b/include/linux/sunrpc/rpc_pipe_fs.h
@@ -2,8 +2,6 @@
#ifndef _LINUX_SUNRPC_RPC_PIPE_FS_H
#define _LINUX_SUNRPC_RPC_PIPE_FS_H
-#ifdef __KERNEL__
-
#include <linux/workqueue.h>
struct rpc_pipe_dir_head {
@@ -133,4 +131,3 @@ extern void unregister_rpc_pipefs(void);
extern bool gssd_running(struct net *net);
#endif
-#endif
diff --git a/include/linux/sunrpc/stats.h b/include/linux/sunrpc/stats.h
index 84b92b4ad1c0..d94d4f410507 100644
--- a/include/linux/sunrpc/stats.h
+++ b/include/linux/sunrpc/stats.h
@@ -63,7 +63,7 @@ struct proc_dir_entry * rpc_proc_register(struct net *,struct rpc_stat *);
void rpc_proc_unregister(struct net *,const char *);
void rpc_proc_zero(const struct rpc_program *);
struct proc_dir_entry * svc_proc_register(struct net *, struct svc_stat *,
- const struct file_operations *);
+ const struct proc_ops *);
void svc_proc_unregister(struct net *, const char *);
void svc_seq_show(struct seq_file *,
@@ -75,7 +75,7 @@ static inline void rpc_proc_unregister(struct net *net, const char *p) {}
static inline void rpc_proc_zero(const struct rpc_program *p) {}
static inline struct proc_dir_entry *svc_proc_register(struct net *net, struct svc_stat *s,
- const struct file_operations *f) { return NULL; }
+ const struct proc_ops *proc_ops) { return NULL; }
static inline void svc_proc_unregister(struct net *net, const char *p) {}
static inline void svc_seq_show(struct seq_file *seq,
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index 3e53a6e2ada7..b0003866a249 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -10,8 +10,6 @@
#ifndef _LINUX_SUNRPC_SVCAUTH_H_
#define _LINUX_SUNRPC_SVCAUTH_H_
-#ifdef __KERNEL__
-
#include <linux/string.h>
#include <linux/sunrpc/msg_prot.h>
#include <linux/sunrpc/cache.h>
@@ -185,6 +183,4 @@ static inline unsigned long hash_mem(char const *buf, int length, int bits)
return full_name_hash(NULL, buf, length) >> (32 - bits);
}
-#endif /* __KERNEL__ */
-
#endif /* _LINUX_SUNRPC_SVCAUTH_H_ */
diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h
index a4528b26c8aa..ca39a388dc22 100644
--- a/include/linux/sunrpc/svcauth_gss.h
+++ b/include/linux/sunrpc/svcauth_gss.h
@@ -9,7 +9,6 @@
#ifndef _LINUX_SUNRPC_SVCAUTH_GSS_H
#define _LINUX_SUNRPC_SVCAUTH_GSS_H
-#ifdef __KERNEL__
#include <linux/sched.h>
#include <linux/sunrpc/types.h>
#include <linux/sunrpc/xdr.h>
@@ -24,5 +23,4 @@ void gss_svc_shutdown_net(struct net *net);
int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name);
u32 svcauth_gss_flavor(struct auth_domain *dom);
-#endif /* __KERNEL__ */
#endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index f33e5013bdfb..b41f34977995 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -11,8 +11,6 @@
#ifndef _SUNRPC_XDR_H_
#define _SUNRPC_XDR_H_
-#ifdef __KERNEL__
-
#include <linux/uio.h>
#include <asm/byteorder.h>
#include <asm/unaligned.h>
@@ -552,6 +550,5 @@ xdr_stream_decode_uint32_array(struct xdr_stream *xdr,
*array = be32_to_cpup(p);
return retval;
}
-#endif /* __KERNEL__ */
#endif /* _SUNRPC_XDR_H_ */
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index d783e15ba898..e64bd8222f55 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -19,8 +19,6 @@
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/msg_prot.h>
-#ifdef __KERNEL__
-
#define RPC_MIN_SLOT_TABLE (2U)
#define RPC_DEF_SLOT_TABLE (16U)
#define RPC_MAX_SLOT_TABLE_LIMIT (65536U)
@@ -207,7 +205,8 @@ struct rpc_xprt {
unsigned int min_reqs; /* min number of slots */
unsigned int num_reqs; /* total slots */
unsigned long state; /* transport state */
- unsigned char resvport : 1; /* use a reserved port */
+ unsigned char resvport : 1, /* use a reserved port */
+ reuseport : 1; /* reuse port on reconnect */
atomic_t swapper; /* we're swapping over this
transport */
unsigned int bind_index; /* bind function index */
@@ -505,6 +504,4 @@ static inline void xprt_inject_disconnect(struct rpc_xprt *xprt)
}
#endif
-#endif /* __KERNEL__*/
-
#endif /* _LINUX_SUNRPC_XPRT_H */
diff --git a/include/linux/sunrpc/xprtsock.h b/include/linux/sunrpc/xprtsock.h
index a940de03808d..3c1423ee74b4 100644
--- a/include/linux/sunrpc/xprtsock.h
+++ b/include/linux/sunrpc/xprtsock.h
@@ -8,8 +8,6 @@
#ifndef _LINUX_SUNRPC_XPRTSOCK_H
#define _LINUX_SUNRPC_XPRTSOCK_H
-#ifdef __KERNEL__
-
int init_socket_xprt(void);
void cleanup_socket_xprt(void);
@@ -91,6 +89,4 @@ struct sock_xprt {
#define XPRT_SOCK_WAKE_PENDING (6)
#define XPRT_SOCK_WAKE_DISCONNECT (7)
-#endif /* __KERNEL__ */
-
#endif /* _LINUX_SUNRPC_XPRTSOCK_H */
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index 6fc8843f1c9e..2b2055b035ee 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -191,7 +191,7 @@ struct platform_s2idle_ops {
int (*begin)(void);
int (*prepare)(void);
int (*prepare_late)(void);
- void (*wake)(void);
+ bool (*wake)(void);
void (*restore_early)(void);
void (*restore)(void);
void (*end)(void);
@@ -329,6 +329,7 @@ extern void arch_suspend_disable_irqs(void);
extern void arch_suspend_enable_irqs(void);
extern int pm_suspend(suspend_state_t state);
+extern bool sync_on_suspend_enabled;
#else /* !CONFIG_SUSPEND */
#define suspend_valid_only_mem NULL
@@ -342,6 +343,7 @@ static inline bool pm_suspend_default_s2idle(void) { return false; }
static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
+static inline bool sync_on_suspend_enabled(void) { return true; }
static inline bool idle_should_enter_s2idle(void) { return false; }
static inline void __init pm_states_init(void) {}
static inline void s2idle_set_ops(const struct platform_s2idle_ops *ops) {}
diff --git a/include/linux/swab.h b/include/linux/swab.h
index e466fd159c85..bcff5149861a 100644
--- a/include/linux/swab.h
+++ b/include/linux/swab.h
@@ -7,6 +7,7 @@
# define swab16 __swab16
# define swab32 __swab32
# define swab64 __swab64
+# define swab __swab
# define swahw32 __swahw32
# define swahb32 __swahb32
# define swab16p __swab16p
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 063c0c1e112b..1e99f7ac1d7e 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -307,7 +307,7 @@ struct vma_swap_readahead {
};
/* linux/mm/workingset.c */
-void *workingset_eviction(struct page *page);
+void *workingset_eviction(struct page *page, struct mem_cgroup *target_memcg);
void workingset_refault(struct page *page, void *shadow);
void workingset_activation(struct page *page);
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index cde3dc18e21a..046bb94bd4d6 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -64,6 +64,9 @@ extern void swiotlb_tbl_sync_single(struct device *hwdev,
size_t size, enum dma_data_direction dir,
enum dma_sync_target target);
+dma_addr_t swiotlb_map(struct device *dev, phys_addr_t phys,
+ size_t size, enum dma_data_direction dir, unsigned long attrs);
+
#ifdef CONFIG_SWIOTLB
extern enum swiotlb_force swiotlb_force;
extern phys_addr_t io_tlb_start, io_tlb_end;
@@ -73,8 +76,6 @@ static inline bool is_swiotlb_buffer(phys_addr_t paddr)
return paddr >= io_tlb_start && paddr < io_tlb_end;
}
-bool swiotlb_map(struct device *dev, phys_addr_t *phys, dma_addr_t *dma_addr,
- size_t size, enum dma_data_direction dir, unsigned long attrs);
void __init swiotlb_exit(void);
unsigned int swiotlb_max_segment(void);
size_t swiotlb_max_mapping_size(struct device *dev);
@@ -85,12 +86,6 @@ static inline bool is_swiotlb_buffer(phys_addr_t paddr)
{
return false;
}
-static inline bool swiotlb_map(struct device *dev, phys_addr_t *phys,
- dma_addr_t *dma_addr, size_t size, enum dma_data_direction dir,
- unsigned long attrs)
-{
- return false;
-}
static inline void swiotlb_exit(void)
{
}
diff --git a/include/linux/switchtec.h b/include/linux/switchtec.h
index e295515bc3f3..082f1d51957a 100644
--- a/include/linux/switchtec.h
+++ b/include/linux/switchtec.h
@@ -21,6 +21,11 @@
#define SWITCHTEC_EVENT_FATAL BIT(4)
#define SWITCHTEC_DMA_MRPC_EN BIT(0)
+
+#define MRPC_GAS_READ 0x29
+#define MRPC_GAS_WRITE 0x87
+#define MRPC_CMD_ID(x) ((x) & 0xffff)
+
enum {
SWITCHTEC_GAS_MRPC_OFFSET = 0x0000,
SWITCHTEC_GAS_TOP_CFG_OFFSET = 0x1000,
@@ -32,6 +37,11 @@ enum {
SWITCHTEC_GAS_PFF_CSR_OFFSET = 0x134000,
};
+enum switchtec_gen {
+ SWITCHTEC_GEN3,
+ SWITCHTEC_GEN4,
+};
+
struct mrpc_regs {
u8 input_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
u8 output_data[SWITCHTEC_MRPC_PAYLOAD_SIZE];
@@ -98,16 +108,37 @@ struct sw_event_regs {
} __packed;
enum {
- SWITCHTEC_CFG0_RUNNING = 0x04,
- SWITCHTEC_CFG1_RUNNING = 0x05,
- SWITCHTEC_IMG0_RUNNING = 0x03,
- SWITCHTEC_IMG1_RUNNING = 0x07,
+ SWITCHTEC_GEN3_CFG0_RUNNING = 0x04,
+ SWITCHTEC_GEN3_CFG1_RUNNING = 0x05,
+ SWITCHTEC_GEN3_IMG0_RUNNING = 0x03,
+ SWITCHTEC_GEN3_IMG1_RUNNING = 0x07,
};
-struct sys_info_regs {
- u32 device_id;
- u32 device_version;
- u32 firmware_version;
+enum {
+ SWITCHTEC_GEN4_MAP0_RUNNING = 0x00,
+ SWITCHTEC_GEN4_MAP1_RUNNING = 0x01,
+ SWITCHTEC_GEN4_KEY0_RUNNING = 0x02,
+ SWITCHTEC_GEN4_KEY1_RUNNING = 0x03,
+ SWITCHTEC_GEN4_BL2_0_RUNNING = 0x04,
+ SWITCHTEC_GEN4_BL2_1_RUNNING = 0x05,
+ SWITCHTEC_GEN4_CFG0_RUNNING = 0x06,
+ SWITCHTEC_GEN4_CFG1_RUNNING = 0x07,
+ SWITCHTEC_GEN4_IMG0_RUNNING = 0x08,
+ SWITCHTEC_GEN4_IMG1_RUNNING = 0x09,
+};
+
+enum {
+ SWITCHTEC_GEN4_KEY0_ACTIVE = 0,
+ SWITCHTEC_GEN4_KEY1_ACTIVE = 1,
+ SWITCHTEC_GEN4_BL2_0_ACTIVE = 0,
+ SWITCHTEC_GEN4_BL2_1_ACTIVE = 1,
+ SWITCHTEC_GEN4_CFG0_ACTIVE = 0,
+ SWITCHTEC_GEN4_CFG1_ACTIVE = 1,
+ SWITCHTEC_GEN4_IMG0_ACTIVE = 0,
+ SWITCHTEC_GEN4_IMG1_ACTIVE = 1,
+};
+
+struct sys_info_regs_gen3 {
u32 reserved1;
u32 vendor_table_revision;
u32 table_format_version;
@@ -124,26 +155,105 @@ struct sys_info_regs {
u8 component_revision;
} __packed;
-struct flash_info_regs {
+struct sys_info_regs_gen4 {
+ u16 gas_layout_ver;
+ u8 evlist_ver;
+ u8 reserved1;
+ u16 mgmt_cmd_set_ver;
+ u16 fabric_cmd_set_ver;
+ u32 reserved2[2];
+ u8 mrpc_uart_ver;
+ u8 mrpc_twi_ver;
+ u8 mrpc_eth_ver;
+ u8 mrpc_inband_ver;
+ u32 reserved3[7];
+ u32 fw_update_tmo;
+ u32 xml_version_cfg;
+ u32 xml_version_img;
+ u32 partition_id;
+ u16 bl2_running;
+ u16 cfg_running;
+ u16 img_running;
+ u16 key_running;
+ u32 reserved4[43];
+ u32 vendor_seeprom_twi;
+ u32 vendor_table_revision;
+ u32 vendor_specific_info[2];
+ u16 p2p_vendor_id;
+ u16 p2p_device_id;
+ u8 p2p_revision_id;
+ u8 reserved5[3];
+ u32 p2p_class_id;
+ u16 subsystem_vendor_id;
+ u16 subsystem_id;
+ u32 p2p_serial_number[2];
+ u8 mac_addr[6];
+ u8 reserved6[2];
+ u32 reserved7[3];
+ char vendor_id[8];
+ char product_id[24];
+ char product_revision[2];
+ u16 reserved8;
+} __packed;
+
+struct sys_info_regs {
+ u32 device_id;
+ u32 device_version;
+ u32 firmware_version;
+ union {
+ struct sys_info_regs_gen3 gen3;
+ struct sys_info_regs_gen4 gen4;
+ };
+} __packed;
+
+struct partition_info {
+ u32 address;
+ u32 length;
+};
+
+struct flash_info_regs_gen3 {
u32 flash_part_map_upd_idx;
- struct active_partition_info {
+ struct active_partition_info_gen3 {
u32 address;
u32 build_version;
u32 build_string;
} active_img;
- struct active_partition_info active_cfg;
- struct active_partition_info inactive_img;
- struct active_partition_info inactive_cfg;
+ struct active_partition_info_gen3 active_cfg;
+ struct active_partition_info_gen3 inactive_img;
+ struct active_partition_info_gen3 inactive_cfg;
u32 flash_length;
- struct partition_info {
- u32 address;
- u32 length;
- } cfg0;
+ struct partition_info cfg0;
+ struct partition_info cfg1;
+ struct partition_info img0;
+ struct partition_info img1;
+ struct partition_info nvlog;
+ struct partition_info vendor[8];
+};
+struct flash_info_regs_gen4 {
+ u32 flash_address;
+ u32 flash_length;
+
+ struct active_partition_info_gen4 {
+ unsigned char bl2;
+ unsigned char cfg;
+ unsigned char img;
+ unsigned char key;
+ } active_flag;
+
+ u32 reserved[3];
+
+ struct partition_info map0;
+ struct partition_info map1;
+ struct partition_info key0;
+ struct partition_info key1;
+ struct partition_info bl2_0;
+ struct partition_info bl2_1;
+ struct partition_info cfg0;
struct partition_info cfg1;
struct partition_info img0;
struct partition_info img1;
@@ -151,6 +261,13 @@ struct flash_info_regs {
struct partition_info vendor[8];
};
+struct flash_info_regs {
+ union {
+ struct flash_info_regs_gen3 gen3;
+ struct flash_info_regs_gen4 gen4;
+ };
+};
+
enum {
SWITCHTEC_NTB_REG_INFO_OFFSET = 0x0000,
SWITCHTEC_NTB_REG_CTRL_OFFSET = 0x4000,
@@ -196,7 +313,9 @@ struct part_cfg_regs {
u32 mrpc_comp_async_data[5];
u32 dyn_binding_hdr;
u32 dyn_binding_data[5];
- u32 reserved4[159];
+ u32 intercomm_notify_hdr;
+ u32 intercomm_notify_data[5];
+ u32 reserved4[153];
} __packed;
enum {
@@ -320,7 +439,8 @@ struct pff_csr_regs {
u32 dpc_data[5];
u32 cts_hdr;
u32 cts_data[5];
- u32 reserved3[6];
+ u32 uec_hdr;
+ u32 uec_data[5];
u32 hotplug_hdr;
u32 hotplug_data[5];
u32 ier_hdr;
@@ -355,6 +475,8 @@ struct switchtec_dev {
struct device dev;
struct cdev cdev;
+ enum switchtec_gen gen;
+
int partition;
int partition_count;
int pff_csr_count;
diff --git a/include/linux/sxgbe_platform.h b/include/linux/sxgbe_platform.h
index 267369110584..966146f7267a 100644
--- a/include/linux/sxgbe_platform.h
+++ b/include/linux/sxgbe_platform.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * 10G controller driver for Samsung EXYNOS SoCs
+ * 10G controller driver for Samsung Exynos SoCs
*
* Copyright (C) 2013 Samsung Electronics Co., Ltd.
* http://www.samsung.com
@@ -10,6 +10,8 @@
#ifndef __SXGBE_PLATFORM_H__
#define __SXGBE_PLATFORM_H__
+#include <linux/phy.h>
+
/* MDC Clock Selection define*/
#define SXGBE_CSR_100_150M 0x0 /* MDC = clk_scr_i/62 */
#define SXGBE_CSR_150_250M 0x1 /* MDC = clk_scr_i/102 */
@@ -38,7 +40,7 @@ struct sxgbe_plat_data {
char *phy_bus_name;
int bus_id;
int phy_addr;
- int interface;
+ phy_interface_t interface;
struct sxgbe_mdio_bus_data *mdio_bus_data;
struct sxgbe_dma_cfg *dma_cfg;
int clk_csr;
diff --git a/include/linux/sys_soc.h b/include/linux/sys_soc.h
index 48ceea867dd6..d9b3cf0f410c 100644
--- a/include/linux/sys_soc.h
+++ b/include/linux/sys_soc.h
@@ -15,6 +15,7 @@ struct soc_device_attribute {
const char *serial_number;
const char *soc_id;
const void *data;
+ const struct attribute_group *custom_attr_group;
};
/**
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index f7c561c4dcdd..1815065d52f3 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -16,8 +16,7 @@ struct inode;
struct iocb;
struct io_event;
struct iovec;
-struct itimerspec;
-struct itimerval;
+struct __kernel_old_itimerval;
struct kexec_segment;
struct linux_dirent;
struct linux_dirent64;
@@ -51,7 +50,7 @@ struct statx;
struct __sysctl_args;
struct sysinfo;
struct timespec;
-struct timeval;
+struct __kernel_old_timeval;
struct __kernel_timex;
struct timezone;
struct tms;
@@ -69,6 +68,7 @@ struct rseq;
union bpf_attr;
struct io_uring_params;
struct clone_args;
+struct open_how;
#include <linux/types.h>
#include <linux/aio_abi.h>
@@ -439,6 +439,8 @@ asmlinkage long sys_fchownat(int dfd, const char __user *filename, uid_t user,
asmlinkage long sys_fchown(unsigned int fd, uid_t user, gid_t group);
asmlinkage long sys_openat(int dfd, const char __user *filename, int flags,
umode_t mode);
+asmlinkage long sys_openat2(int dfd, const char __user *filename,
+ struct open_how *how, size_t size);
asmlinkage long sys_close(unsigned int fd);
asmlinkage long sys_vhangup(void);
@@ -591,10 +593,10 @@ asmlinkage long sys_nanosleep_time32(struct old_timespec32 __user *rqtp,
struct old_timespec32 __user *rmtp);
/* kernel/itimer.c */
-asmlinkage long sys_getitimer(int which, struct itimerval __user *value);
+asmlinkage long sys_getitimer(int which, struct __kernel_old_itimerval __user *value);
asmlinkage long sys_setitimer(int which,
- struct itimerval __user *value,
- struct itimerval __user *ovalue);
+ struct __kernel_old_itimerval __user *value,
+ struct __kernel_old_itimerval __user *ovalue);
/* kernel/kexec.c */
asmlinkage long sys_kexec_load(unsigned long entry, unsigned long nr_segments,
@@ -732,9 +734,9 @@ asmlinkage long sys_prctl(int option, unsigned long arg2, unsigned long arg3,
asmlinkage long sys_getcpu(unsigned __user *cpu, unsigned __user *node, struct getcpu_cache __user *cache);
/* kernel/time.c */
-asmlinkage long sys_gettimeofday(struct timeval __user *tv,
+asmlinkage long sys_gettimeofday(struct __kernel_old_timeval __user *tv,
struct timezone __user *tz);
-asmlinkage long sys_settimeofday(struct timeval __user *tv,
+asmlinkage long sys_settimeofday(struct __kernel_old_timeval __user *tv,
struct timezone __user *tz);
asmlinkage long sys_adjtimex(struct __kernel_timex __user *txc_p);
asmlinkage long sys_adjtimex_time32(struct old_timex32 __user *txc_p);
@@ -1000,6 +1002,7 @@ asmlinkage long sys_fspick(int dfd, const char __user *path, unsigned int flags)
asmlinkage long sys_pidfd_send_signal(int pidfd, int sig,
siginfo_t __user *info,
unsigned int flags);
+asmlinkage long sys_pidfd_getfd(int pidfd, int fd, unsigned int flags);
/*
* Architecture-specific system calls
@@ -1076,15 +1079,15 @@ asmlinkage long sys_fadvise64(int fd, loff_t offset, size_t len, int advice);
asmlinkage long sys_alarm(unsigned int seconds);
asmlinkage long sys_getpgrp(void);
asmlinkage long sys_pause(void);
-asmlinkage long sys_time(time_t __user *tloc);
+asmlinkage long sys_time(__kernel_old_time_t __user *tloc);
asmlinkage long sys_time32(old_time32_t __user *tloc);
#ifdef __ARCH_WANT_SYS_UTIME
asmlinkage long sys_utime(char __user *filename,
struct utimbuf __user *times);
asmlinkage long sys_utimes(char __user *filename,
- struct timeval __user *utimes);
+ struct __kernel_old_timeval __user *utimes);
asmlinkage long sys_futimesat(int dfd, const char __user *filename,
- struct timeval __user *utimes);
+ struct __kernel_old_timeval __user *utimes);
#endif
asmlinkage long sys_futimesat_time32(unsigned int dfd,
const char __user *filename,
@@ -1098,7 +1101,7 @@ asmlinkage long sys_getdents(unsigned int fd,
struct linux_dirent __user *dirent,
unsigned int count);
asmlinkage long sys_select(int n, fd_set __user *inp, fd_set __user *outp,
- fd_set __user *exp, struct timeval __user *tvp);
+ fd_set __user *exp, struct __kernel_old_timeval __user *tvp);
asmlinkage long sys_poll(struct pollfd __user *ufds, unsigned int nfds,
int timeout);
asmlinkage long sys_epoll_wait(int epfd, struct epoll_event __user *events,
@@ -1116,7 +1119,7 @@ asmlinkage long sys_sysfs(int option,
asmlinkage long sys_fork(void);
/* obsolete: kernel/time/time.c */
-asmlinkage long sys_stime(time_t __user *tptr);
+asmlinkage long sys_stime(__kernel_old_time_t __user *tptr);
asmlinkage long sys_stime32(old_time32_t __user *tptr);
/* obsolete: kernel/signal.c */
@@ -1231,8 +1234,6 @@ asmlinkage long sys_ni_syscall(void);
* the ksys_xyzyyz() functions prototyped below.
*/
-int ksys_mount(const char __user *dev_name, const char __user *dir_name,
- const char __user *type, unsigned long flags, void __user *data);
int ksys_umount(char __user *name, int flags);
int ksys_dup(unsigned int fildes);
int ksys_chroot(const char __user *filename);
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 6df477329b76..02fa84493f23 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -120,8 +120,7 @@ static inline void *proc_sys_poll_event(struct ctl_table_poll *poll)
struct ctl_table_poll name = __CTL_TABLE_POLL_INITIALIZER(name)
/* A sysctl table is an array of struct ctl_table: */
-struct ctl_table
-{
+struct ctl_table {
const char *procname; /* Text ID for /proc/sys, or zero */
void *data;
int maxlen;
@@ -140,8 +139,7 @@ struct ctl_node {
/* struct ctl_table_header is used to maintain dynamic lists of
struct ctl_table trees. */
-struct ctl_table_header
-{
+struct ctl_table_header {
union {
struct {
struct ctl_table *ctl_table;
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 668e25a76d69..3dc964010fef 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -78,6 +78,27 @@ struct tcp_sack_block {
#define TCP_SACK_SEEN (1 << 0) /*1 = peer is SACK capable, */
#define TCP_DSACK_SEEN (1 << 2) /*1 = DSACK was received from peer*/
+#if IS_ENABLED(CONFIG_MPTCP)
+struct mptcp_options_received {
+ u64 sndr_key;
+ u64 rcvr_key;
+ u64 data_ack;
+ u64 data_seq;
+ u32 subflow_seq;
+ u16 data_len;
+ u8 mp_capable : 1,
+ mp_join : 1,
+ dss : 1;
+ u8 use_map:1,
+ dsn64:1,
+ data_fin:1,
+ use_ack:1,
+ ack64:1,
+ mpc_map:1,
+ __unused:2;
+};
+#endif
+
struct tcp_options_received {
/* PAWS/RTTM data */
int ts_recent_stamp;/* Time we stored ts_recent (for aging) */
@@ -95,6 +116,9 @@ struct tcp_options_received {
u8 num_sacks; /* Number of SACK blocks */
u16 user_mss; /* mss requested by user in ioctl */
u16 mss_clamp; /* Maximal mss, negotiated at connection setup */
+#if IS_ENABLED(CONFIG_MPTCP)
+ struct mptcp_options_received mptcp;
+#endif
};
static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
@@ -104,6 +128,11 @@ static inline void tcp_clear_options(struct tcp_options_received *rx_opt)
#if IS_ENABLED(CONFIG_SMC)
rx_opt->smc_ok = 0;
#endif
+#if IS_ENABLED(CONFIG_MPTCP)
+ rx_opt->mptcp.mp_capable = 0;
+ rx_opt->mptcp.mp_join = 0;
+ rx_opt->mptcp.dss = 0;
+#endif
}
/* This is the max number of SACKS that we'll generate and process. It's safe
@@ -119,6 +148,7 @@ struct tcp_request_sock {
const struct tcp_request_sock_ops *af_specific;
u64 snt_synack; /* first SYNACK sent time */
bool tfo_listener;
+ bool is_mptcp;
u32 txhash;
u32 rcv_isn;
u32 snt_isn;
@@ -223,7 +253,7 @@ struct tcp_sock {
fastopen_connect:1, /* FASTOPEN_CONNECT sockopt */
fastopen_no_cookie:1, /* Allow send/recv SYN+data without a cookie */
is_sack_reneg:1, /* in recovery from loss with SACK reneg? */
- unused:2;
+ fastopen_client_fail:2; /* reason why fastopen failed */
u8 nonagle : 4,/* Disable Nagle algorithm? */
thin_lto : 1,/* Use linear timeouts for thin streams */
recvmsg_inq : 1,/* Indicate # of bytes in queue upon recvmsg */
@@ -354,6 +384,8 @@ struct tcp_sock {
#define BPF_SOCK_OPS_TEST_FLAG(TP, ARG) 0
#endif
+ u16 timeout_rehash; /* Timeout-triggered rehash attempts */
+
u32 rcv_ooopack; /* Received out-of-order packets, for tcpinfo */
/* Receiver side RTT estimation */
@@ -379,6 +411,9 @@ struct tcp_sock {
u32 mtu_info; /* We received an ICMP_FRAG_NEEDED / ICMPV6_PKT_TOOBIG
* while socket was owned by user.
*/
+#if IS_ENABLED(CONFIG_MPTCP)
+ bool is_mptcp;
+#endif
#ifdef CONFIG_TCP_MD5SIG
/* TCP AF-Specific parts; only used by MD5 Signature support so far */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index e45659c75920..126913c6a53b 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -32,17 +32,6 @@
/* use value, which < 0K, to indicate an invalid/uninitialized temperature */
#define THERMAL_TEMP_INVALID -274000
-/* Unit conversion macros */
-#define DECI_KELVIN_TO_CELSIUS(t) ({ \
- long _t = (t); \
- ((_t-2732 >= 0) ? (_t-2732+5)/10 : (_t-2732-5)/10); \
-})
-#define CELSIUS_TO_DECI_KELVIN(t) ((t)*10+2732)
-#define DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, off) (((t) - (off)) * 100)
-#define DECI_KELVIN_TO_MILLICELSIUS(t) DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, 2732)
-#define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off))
-#define MILLICELSIUS_TO_DECI_KELVIN(t) MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, 2732)
-
/* Default Thermal Governor */
#if defined(CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE)
#define DEFAULT_THERMAL_GOVERNOR "step_wise"
@@ -544,15 +533,4 @@ static inline void thermal_notify_framework(struct thermal_zone_device *tz,
{ }
#endif /* CONFIG_THERMAL */
-#if defined(CONFIG_NET) && IS_ENABLED(CONFIG_THERMAL)
-extern int thermal_generate_netlink_event(struct thermal_zone_device *tz,
- enum events event);
-#else
-static inline int thermal_generate_netlink_event(struct thermal_zone_device *tz,
- enum events event)
-{
- return 0;
-}
-#endif
-
#endif /* __THERMAL_H__ */
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 659a4400517b..e93e249a4e9b 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -147,6 +147,8 @@ check_copy_size(const void *addr, size_t bytes, bool is_source)
__bad_copy_to();
return false;
}
+ if (WARN_ON_ONCE(bytes > INT_MAX))
+ return false;
check_object_size(addr, bytes, is_source);
return true;
}
diff --git a/include/linux/threads.h b/include/linux/threads.h
index 3086dba525e2..18d5a74bcc3d 100644
--- a/include/linux/threads.h
+++ b/include/linux/threads.h
@@ -29,7 +29,7 @@
/*
* A maximum of 4 million PIDs should be enough for a while.
- * [NOTE: PID/TIDs are limited to 2^29 ~= 500+ million, see futex.h.]
+ * [NOTE: PID/TIDs are limited to 2^30 ~= 1 billion, see FUTEX_TID_MASK.]
*/
#define PID_MAX_LIMIT (CONFIG_BASE_SMALL ? PAGE_SIZE * 8 : \
(sizeof(long) > 4 ? 4 * 1024 * 1024 : PID_MAX_DEFAULT))
diff --git a/include/linux/tick.h b/include/linux/tick.h
index f92a10b5e112..7340613c7eff 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -108,14 +108,19 @@ enum tick_dep_bits {
TICK_DEP_BIT_POSIX_TIMER = 0,
TICK_DEP_BIT_PERF_EVENTS = 1,
TICK_DEP_BIT_SCHED = 2,
- TICK_DEP_BIT_CLOCK_UNSTABLE = 3
+ TICK_DEP_BIT_CLOCK_UNSTABLE = 3,
+ TICK_DEP_BIT_RCU = 4,
+ TICK_DEP_BIT_RCU_EXP = 5
};
+#define TICK_DEP_BIT_MAX TICK_DEP_BIT_RCU_EXP
#define TICK_DEP_MASK_NONE 0
#define TICK_DEP_MASK_POSIX_TIMER (1 << TICK_DEP_BIT_POSIX_TIMER)
#define TICK_DEP_MASK_PERF_EVENTS (1 << TICK_DEP_BIT_PERF_EVENTS)
#define TICK_DEP_MASK_SCHED (1 << TICK_DEP_BIT_SCHED)
#define TICK_DEP_MASK_CLOCK_UNSTABLE (1 << TICK_DEP_BIT_CLOCK_UNSTABLE)
+#define TICK_DEP_MASK_RCU (1 << TICK_DEP_BIT_RCU)
+#define TICK_DEP_MASK_RCU_EXP (1 << TICK_DEP_BIT_RCU_EXP)
#ifdef CONFIG_NO_HZ_COMMON
extern bool tick_nohz_enabled;
@@ -174,7 +179,7 @@ extern cpumask_var_t tick_nohz_full_mask;
static inline bool tick_nohz_full_enabled(void)
{
- if (!context_tracking_is_enabled())
+ if (!context_tracking_enabled())
return false;
return tick_nohz_full_running;
@@ -268,6 +273,9 @@ static inline bool tick_nohz_full_enabled(void) { return false; }
static inline bool tick_nohz_full_cpu(int cpu) { return false; }
static inline void tick_nohz_full_add_cpus_to(struct cpumask *mask) { }
+static inline void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
+static inline void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) { }
+
static inline void tick_dep_set(enum tick_dep_bits bit) { }
static inline void tick_dep_clear(enum tick_dep_bits bit) { }
static inline void tick_dep_set_cpu(int cpu, enum tick_dep_bits bit) { }
diff --git a/include/linux/time.h b/include/linux/time.h
index 27d83fd2ae61..4c325bf44ce0 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -35,10 +35,11 @@ extern time64_t mktime64(const unsigned int year, const unsigned int mon,
extern u32 (*arch_gettimeoffset)(void);
#endif
-struct itimerval;
-extern int do_setitimer(int which, struct itimerval *value,
- struct itimerval *ovalue);
-extern int do_getitimer(int which, struct itimerval *value);
+#ifdef CONFIG_POSIX_TIMERS
+extern void clear_itimer(void);
+#else
+static inline void clear_itimer(void) {}
+#endif
extern long do_utimes(int dfd, const char __user *filename, struct timespec64 *times, int flags);
@@ -96,4 +97,20 @@ static inline bool itimerspec64_valid(const struct itimerspec64 *its)
*/
#define time_after32(a, b) ((s32)((u32)(b) - (u32)(a)) < 0)
#define time_before32(b, a) time_after32(a, b)
+
+/**
+ * time_between32 - check if a 32-bit timestamp is within a given time range
+ * @t: the time which may be within [l,h]
+ * @l: the lower bound of the range
+ * @h: the higher bound of the range
+ *
+ * time_before32(t, l, h) returns true if @l <= @t <= @h. All operands are
+ * treated as 32-bit integers.
+ *
+ * Equivalent to !(time_before32(@t, @l) || time_after32(@t, @h)).
+ */
+#define time_between32(t, l, h) ((u32)(h) - (u32)(l) >= (u32)(t) - (u32)(l))
+
+# include <vdso/time.h>
+
#endif
diff --git a/include/linux/time32.h b/include/linux/time32.h
index 0a1f302a1753..83a400b6ba99 100644
--- a/include/linux/time32.h
+++ b/include/linux/time32.h
@@ -12,19 +12,7 @@
#include <linux/time64.h>
#include <linux/timex.h>
-#define TIME_T_MAX (time_t)((1UL << ((sizeof(time_t) << 3) - 1)) - 1)
-
-typedef s32 old_time32_t;
-
-struct old_timespec32 {
- old_time32_t tv_sec;
- s32 tv_nsec;
-};
-
-struct old_timeval32 {
- old_time32_t tv_sec;
- s32 tv_usec;
-};
+#include <vdso/time32.h>
struct old_itimerspec32 {
struct old_timespec32 it_interval;
@@ -73,162 +61,12 @@ struct __kernel_timex;
int get_old_timex32(struct __kernel_timex *, const struct old_timex32 __user *);
int put_old_timex32(struct old_timex32 __user *, const struct __kernel_timex *);
-#if __BITS_PER_LONG == 64
-
-/* timespec64 is defined as timespec here */
-static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
-{
- return *(const struct timespec *)&ts64;
-}
-
-static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
-{
- return *(const struct timespec64 *)&ts;
-}
-
-#else
-static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64)
-{
- struct timespec ret;
-
- ret.tv_sec = (time_t)ts64.tv_sec;
- ret.tv_nsec = ts64.tv_nsec;
- return ret;
-}
-
-static inline struct timespec64 timespec_to_timespec64(const struct timespec ts)
-{
- struct timespec64 ret;
-
- ret.tv_sec = ts.tv_sec;
- ret.tv_nsec = ts.tv_nsec;
- return ret;
-}
-#endif
-
-static inline int timespec_equal(const struct timespec *a,
- const struct timespec *b)
-{
- return (a->tv_sec == b->tv_sec) && (a->tv_nsec == b->tv_nsec);
-}
-
-/*
- * lhs < rhs: return <0
- * lhs == rhs: return 0
- * lhs > rhs: return >0
- */
-static inline int timespec_compare(const struct timespec *lhs, const struct timespec *rhs)
-{
- if (lhs->tv_sec < rhs->tv_sec)
- return -1;
- if (lhs->tv_sec > rhs->tv_sec)
- return 1;
- return lhs->tv_nsec - rhs->tv_nsec;
-}
-
-/*
- * Returns true if the timespec is norm, false if denorm:
- */
-static inline bool timespec_valid(const struct timespec *ts)
-{
- /* Dates before 1970 are bogus */
- if (ts->tv_sec < 0)
- return false;
- /* Can't have more nanoseconds then a second */
- if ((unsigned long)ts->tv_nsec >= NSEC_PER_SEC)
- return false;
- return true;
-}
-
-/**
- * timespec_to_ns - Convert timespec to nanoseconds
- * @ts: pointer to the timespec variable to be converted
- *
- * Returns the scalar nanosecond representation of the timespec
- * parameter.
- */
-static inline s64 timespec_to_ns(const struct timespec *ts)
-{
- return ((s64) ts->tv_sec * NSEC_PER_SEC) + ts->tv_nsec;
-}
-
-/**
- * ns_to_timespec - Convert nanoseconds to timespec
- * @nsec: the nanoseconds value to be converted
- *
- * Returns the timespec representation of the nsec parameter.
- */
-extern struct timespec ns_to_timespec(const s64 nsec);
-
-/**
- * timespec_add_ns - Adds nanoseconds to a timespec
- * @a: pointer to timespec to be incremented
- * @ns: unsigned nanoseconds value to be added
- *
- * This must always be inlined because its used from the x86-64 vdso,
- * which cannot call other kernel functions.
- */
-static __always_inline void timespec_add_ns(struct timespec *a, u64 ns)
-{
- a->tv_sec += __iter_div_u64_rem(a->tv_nsec + ns, NSEC_PER_SEC, &ns);
- a->tv_nsec = ns;
-}
-
-static inline unsigned long mktime(const unsigned int year,
- const unsigned int mon, const unsigned int day,
- const unsigned int hour, const unsigned int min,
- const unsigned int sec)
-{
- return mktime64(year, mon, day, hour, min, sec);
-}
-
-static inline bool timeval_valid(const struct timeval *tv)
-{
- /* Dates before 1970 are bogus */
- if (tv->tv_sec < 0)
- return false;
-
- /* Can't have more microseconds then a second */
- if (tv->tv_usec < 0 || tv->tv_usec >= USEC_PER_SEC)
- return false;
-
- return true;
-}
-
/**
- * timeval_to_ns - Convert timeval to nanoseconds
- * @ts: pointer to the timeval variable to be converted
- *
- * Returns the scalar nanosecond representation of the timeval
- * parameter.
- */
-static inline s64 timeval_to_ns(const struct timeval *tv)
-{
- return ((s64) tv->tv_sec * NSEC_PER_SEC) +
- tv->tv_usec * NSEC_PER_USEC;
-}
-
-/**
- * ns_to_timeval - Convert nanoseconds to timeval
+ * ns_to_kernel_old_timeval - Convert nanoseconds to timeval
* @nsec: the nanoseconds value to be converted
*
* Returns the timeval representation of the nsec parameter.
*/
-extern struct timeval ns_to_timeval(const s64 nsec);
extern struct __kernel_old_timeval ns_to_kernel_old_timeval(s64 nsec);
-/*
- * Old names for the 32-bit time_t interfaces, these will be removed
- * when everything uses the new names.
- */
-#define compat_time_t old_time32_t
-#define compat_timeval old_timeval32
-#define compat_timespec old_timespec32
-#define compat_itimerspec old_itimerspec32
-#define ns_to_compat_timeval ns_to_old_timeval32
-#define get_compat_itimerspec64 get_old_itimerspec32
-#define put_compat_itimerspec64 put_old_itimerspec32
-#define compat_get_timespec64 get_old_timespec32
-#define compat_put_timespec64 put_old_timespec32
-
#endif
diff --git a/include/linux/time64.h b/include/linux/time64.h
index 19125489ae94..c9dcb3e5781f 100644
--- a/include/linux/time64.h
+++ b/include/linux/time64.h
@@ -3,6 +3,7 @@
#define _LINUX_TIME64_H
#include <linux/math64.h>
+#include <vdso/time64.h>
typedef __s64 time64_t;
typedef __u64 timeu64_t;
@@ -19,15 +20,6 @@ struct itimerspec64 {
struct timespec64 it_value;
};
-/* Parameters used to convert the timespec values: */
-#define MSEC_PER_SEC 1000L
-#define USEC_PER_MSEC 1000L
-#define NSEC_PER_USEC 1000L
-#define NSEC_PER_MSEC 1000000L
-#define USEC_PER_SEC 1000000L
-#define NSEC_PER_SEC 1000000000L
-#define FSEC_PER_SEC 1000000000000000LL
-
/* Located here for timespec[64]_valid_strict */
#define TIME64_MAX ((s64)~((u64)1 << 63))
#define TIME64_MIN (-TIME64_MAX - 1)
diff --git a/include/linux/time_namespace.h b/include/linux/time_namespace.h
new file mode 100644
index 000000000000..824d54e057eb
--- /dev/null
+++ b/include/linux/time_namespace.h
@@ -0,0 +1,133 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_TIMENS_H
+#define _LINUX_TIMENS_H
+
+
+#include <linux/sched.h>
+#include <linux/kref.h>
+#include <linux/nsproxy.h>
+#include <linux/ns_common.h>
+#include <linux/err.h>
+
+struct user_namespace;
+extern struct user_namespace init_user_ns;
+
+struct timens_offsets {
+ struct timespec64 monotonic;
+ struct timespec64 boottime;
+};
+
+struct time_namespace {
+ struct kref kref;
+ struct user_namespace *user_ns;
+ struct ucounts *ucounts;
+ struct ns_common ns;
+ struct timens_offsets offsets;
+ struct page *vvar_page;
+ /* If set prevents changing offsets after any task joined namespace. */
+ bool frozen_offsets;
+} __randomize_layout;
+
+extern struct time_namespace init_time_ns;
+
+#ifdef CONFIG_TIME_NS
+extern int vdso_join_timens(struct task_struct *task,
+ struct time_namespace *ns);
+
+static inline struct time_namespace *get_time_ns(struct time_namespace *ns)
+{
+ kref_get(&ns->kref);
+ return ns;
+}
+
+struct time_namespace *copy_time_ns(unsigned long flags,
+ struct user_namespace *user_ns,
+ struct time_namespace *old_ns);
+void free_time_ns(struct kref *kref);
+int timens_on_fork(struct nsproxy *nsproxy, struct task_struct *tsk);
+struct vdso_data *arch_get_vdso_data(void *vvar_page);
+
+static inline void put_time_ns(struct time_namespace *ns)
+{
+ kref_put(&ns->kref, free_time_ns);
+}
+
+void proc_timens_show_offsets(struct task_struct *p, struct seq_file *m);
+
+struct proc_timens_offset {
+ int clockid;
+ struct timespec64 val;
+};
+
+int proc_timens_set_offset(struct file *file, struct task_struct *p,
+ struct proc_timens_offset *offsets, int n);
+
+static inline void timens_add_monotonic(struct timespec64 *ts)
+{
+ struct timens_offsets *ns_offsets = &current->nsproxy->time_ns->offsets;
+
+ *ts = timespec64_add(*ts, ns_offsets->monotonic);
+}
+
+static inline void timens_add_boottime(struct timespec64 *ts)
+{
+ struct timens_offsets *ns_offsets = &current->nsproxy->time_ns->offsets;
+
+ *ts = timespec64_add(*ts, ns_offsets->boottime);
+}
+
+ktime_t do_timens_ktime_to_host(clockid_t clockid, ktime_t tim,
+ struct timens_offsets *offsets);
+
+static inline ktime_t timens_ktime_to_host(clockid_t clockid, ktime_t tim)
+{
+ struct time_namespace *ns = current->nsproxy->time_ns;
+
+ if (likely(ns == &init_time_ns))
+ return tim;
+
+ return do_timens_ktime_to_host(clockid, tim, &ns->offsets);
+}
+
+#else
+static inline int vdso_join_timens(struct task_struct *task,
+ struct time_namespace *ns)
+{
+ return 0;
+}
+
+static inline struct time_namespace *get_time_ns(struct time_namespace *ns)
+{
+ return NULL;
+}
+
+static inline void put_time_ns(struct time_namespace *ns)
+{
+}
+
+static inline
+struct time_namespace *copy_time_ns(unsigned long flags,
+ struct user_namespace *user_ns,
+ struct time_namespace *old_ns)
+{
+ if (flags & CLONE_NEWTIME)
+ return ERR_PTR(-EINVAL);
+
+ return old_ns;
+}
+
+static inline int timens_on_fork(struct nsproxy *nsproxy,
+ struct task_struct *tsk)
+{
+ return 0;
+}
+
+static inline void timens_add_monotonic(struct timespec64 *ts) { }
+static inline void timens_add_boottime(struct timespec64 *ts) { }
+static inline ktime_t timens_ktime_to_host(clockid_t clockid, ktime_t tim)
+{
+ return tim;
+}
+#endif
+
+#endif /* _LINUX_TIMENS_H */
diff --git a/include/linux/timekeeping32.h b/include/linux/timekeeping32.h
index cc59cc9e0e84..266017fc9ee9 100644
--- a/include/linux/timekeeping32.h
+++ b/include/linux/timekeeping32.h
@@ -11,36 +11,4 @@ static inline unsigned long get_seconds(void)
return ktime_get_real_seconds();
}
-static inline void getnstimeofday(struct timespec *ts)
-{
- struct timespec64 ts64;
-
- ktime_get_real_ts64(&ts64);
- *ts = timespec64_to_timespec(ts64);
-}
-
-static inline void ktime_get_ts(struct timespec *ts)
-{
- struct timespec64 ts64;
-
- ktime_get_ts64(&ts64);
- *ts = timespec64_to_timespec(ts64);
-}
-
-static inline void getrawmonotonic(struct timespec *ts)
-{
- struct timespec64 ts64;
-
- ktime_get_raw_ts64(&ts64);
- *ts = timespec64_to_timespec(ts64);
-}
-
-static inline void getboottime(struct timespec *ts)
-{
- struct timespec64 ts64;
-
- getboottime64(&ts64);
- *ts = timespec64_to_timespec(ts64);
-}
-
#endif
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 1e6650ed066d..0dc19a8c39c9 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -164,7 +164,7 @@ static inline void destroy_timer_on_stack(struct timer_list *timer) { }
*/
static inline int timer_pending(const struct timer_list * timer)
{
- return timer->entry.pprev != NULL;
+ return !hlist_unhashed_lockless(&timer->entry);
}
extern void add_timer_on(struct timer_list *timer, int cpu);
diff --git a/include/linux/tnum.h b/include/linux/tnum.h
index c17af77f3fae..ea627d1ab7e3 100644
--- a/include/linux/tnum.h
+++ b/include/linux/tnum.h
@@ -30,7 +30,7 @@ struct tnum tnum_lshift(struct tnum a, u8 shift);
/* Shift (rsh) a tnum right (by a fixed shift) */
struct tnum tnum_rshift(struct tnum a, u8 shift);
/* Shift (arsh) a tnum right (by a fixed min_shift) */
-struct tnum tnum_arshift(struct tnum a, u8 min_shift);
+struct tnum tnum_arshift(struct tnum a, u8 min_shift, u8 insn_bitness);
/* Add two tnums, return @a + @b */
struct tnum tnum_add(struct tnum a, struct tnum b);
/* Subtract two tnums, return @a - @b */
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index 53c0ea9ec9df..03e9b184411b 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -21,6 +21,7 @@
#include <linux/acpi.h>
#include <linux/cdev.h>
#include <linux/fs.h>
+#include <linux/highmem.h>
#include <crypto/hash_info.h>
#define TPM_DIGEST_SIZE 20 /* Max TPM v1.2 PCR size */
@@ -67,6 +68,8 @@ struct tpm_class_ops {
u8 (*status) (struct tpm_chip *chip);
void (*update_timeouts)(struct tpm_chip *chip,
unsigned long *timeout_cap);
+ void (*update_durations)(struct tpm_chip *chip,
+ unsigned long *duration_cap);
int (*go_idle)(struct tpm_chip *chip);
int (*cmd_ready)(struct tpm_chip *chip);
int (*request_locality)(struct tpm_chip *chip, int loc);
@@ -161,6 +164,235 @@ struct tpm_chip {
int locality;
};
+#define TPM_HEADER_SIZE 10
+
+enum tpm2_const {
+ TPM2_PLATFORM_PCR = 24,
+ TPM2_PCR_SELECT_MIN = ((TPM2_PLATFORM_PCR + 7) / 8),
+};
+
+enum tpm2_timeouts {
+ TPM2_TIMEOUT_A = 750,
+ TPM2_TIMEOUT_B = 2000,
+ TPM2_TIMEOUT_C = 200,
+ TPM2_TIMEOUT_D = 30,
+ TPM2_DURATION_SHORT = 20,
+ TPM2_DURATION_MEDIUM = 750,
+ TPM2_DURATION_LONG = 2000,
+ TPM2_DURATION_LONG_LONG = 300000,
+ TPM2_DURATION_DEFAULT = 120000,
+};
+
+enum tpm2_structures {
+ TPM2_ST_NO_SESSIONS = 0x8001,
+ TPM2_ST_SESSIONS = 0x8002,
+};
+
+/* Indicates from what layer of the software stack the error comes from */
+#define TSS2_RC_LAYER_SHIFT 16
+#define TSS2_RESMGR_TPM_RC_LAYER (11 << TSS2_RC_LAYER_SHIFT)
+
+enum tpm2_return_codes {
+ TPM2_RC_SUCCESS = 0x0000,
+ TPM2_RC_HASH = 0x0083, /* RC_FMT1 */
+ TPM2_RC_HANDLE = 0x008B,
+ TPM2_RC_INITIALIZE = 0x0100, /* RC_VER1 */
+ TPM2_RC_FAILURE = 0x0101,
+ TPM2_RC_DISABLED = 0x0120,
+ TPM2_RC_COMMAND_CODE = 0x0143,
+ TPM2_RC_TESTING = 0x090A, /* RC_WARN */
+ TPM2_RC_REFERENCE_H0 = 0x0910,
+ TPM2_RC_RETRY = 0x0922,
+};
+
+enum tpm2_command_codes {
+ TPM2_CC_FIRST = 0x011F,
+ TPM2_CC_HIERARCHY_CONTROL = 0x0121,
+ TPM2_CC_HIERARCHY_CHANGE_AUTH = 0x0129,
+ TPM2_CC_CREATE_PRIMARY = 0x0131,
+ TPM2_CC_SEQUENCE_COMPLETE = 0x013E,
+ TPM2_CC_SELF_TEST = 0x0143,
+ TPM2_CC_STARTUP = 0x0144,
+ TPM2_CC_SHUTDOWN = 0x0145,
+ TPM2_CC_NV_READ = 0x014E,
+ TPM2_CC_CREATE = 0x0153,
+ TPM2_CC_LOAD = 0x0157,
+ TPM2_CC_SEQUENCE_UPDATE = 0x015C,
+ TPM2_CC_UNSEAL = 0x015E,
+ TPM2_CC_CONTEXT_LOAD = 0x0161,
+ TPM2_CC_CONTEXT_SAVE = 0x0162,
+ TPM2_CC_FLUSH_CONTEXT = 0x0165,
+ TPM2_CC_VERIFY_SIGNATURE = 0x0177,
+ TPM2_CC_GET_CAPABILITY = 0x017A,
+ TPM2_CC_GET_RANDOM = 0x017B,
+ TPM2_CC_PCR_READ = 0x017E,
+ TPM2_CC_PCR_EXTEND = 0x0182,
+ TPM2_CC_EVENT_SEQUENCE_COMPLETE = 0x0185,
+ TPM2_CC_HASH_SEQUENCE_START = 0x0186,
+ TPM2_CC_CREATE_LOADED = 0x0191,
+ TPM2_CC_LAST = 0x0193, /* Spec 1.36 */
+};
+
+enum tpm2_permanent_handles {
+ TPM2_RS_PW = 0x40000009,
+};
+
+enum tpm2_capabilities {
+ TPM2_CAP_HANDLES = 1,
+ TPM2_CAP_COMMANDS = 2,
+ TPM2_CAP_PCRS = 5,
+ TPM2_CAP_TPM_PROPERTIES = 6,
+};
+
+enum tpm2_properties {
+ TPM_PT_TOTAL_COMMANDS = 0x0129,
+};
+
+enum tpm2_startup_types {
+ TPM2_SU_CLEAR = 0x0000,
+ TPM2_SU_STATE = 0x0001,
+};
+
+enum tpm2_cc_attrs {
+ TPM2_CC_ATTR_CHANDLES = 25,
+ TPM2_CC_ATTR_RHANDLE = 28,
+};
+
+#define TPM_VID_INTEL 0x8086
+#define TPM_VID_WINBOND 0x1050
+#define TPM_VID_STM 0x104A
+
+enum tpm_chip_flags {
+ TPM_CHIP_FLAG_TPM2 = BIT(1),
+ TPM_CHIP_FLAG_IRQ = BIT(2),
+ TPM_CHIP_FLAG_VIRTUAL = BIT(3),
+ TPM_CHIP_FLAG_HAVE_TIMEOUTS = BIT(4),
+ TPM_CHIP_FLAG_ALWAYS_POWERED = BIT(5),
+ TPM_CHIP_FLAG_FIRMWARE_POWER_MANAGED = BIT(6),
+};
+
+#define to_tpm_chip(d) container_of(d, struct tpm_chip, dev)
+
+struct tpm_header {
+ __be16 tag;
+ __be32 length;
+ union {
+ __be32 ordinal;
+ __be32 return_code;
+ };
+} __packed;
+
+/* A string buffer type for constructing TPM commands. This is based on the
+ * ideas of string buffer code in security/keys/trusted.h but is heap based
+ * in order to keep the stack usage minimal.
+ */
+
+enum tpm_buf_flags {
+ TPM_BUF_OVERFLOW = BIT(0),
+};
+
+struct tpm_buf {
+ unsigned int flags;
+ u8 *data;
+};
+
+enum tpm2_object_attributes {
+ TPM2_OA_USER_WITH_AUTH = BIT(6),
+};
+
+enum tpm2_session_attributes {
+ TPM2_SA_CONTINUE_SESSION = BIT(0),
+};
+
+struct tpm2_hash {
+ unsigned int crypto_id;
+ unsigned int tpm_id;
+};
+
+static inline void tpm_buf_reset(struct tpm_buf *buf, u16 tag, u32 ordinal)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+
+ head->tag = cpu_to_be16(tag);
+ head->length = cpu_to_be32(sizeof(*head));
+ head->ordinal = cpu_to_be32(ordinal);
+}
+
+static inline int tpm_buf_init(struct tpm_buf *buf, u16 tag, u32 ordinal)
+{
+ buf->data = (u8 *)__get_free_page(GFP_KERNEL);
+ if (!buf->data)
+ return -ENOMEM;
+
+ buf->flags = 0;
+ tpm_buf_reset(buf, tag, ordinal);
+ return 0;
+}
+
+static inline void tpm_buf_destroy(struct tpm_buf *buf)
+{
+ free_page((unsigned long)buf->data);
+}
+
+static inline u32 tpm_buf_length(struct tpm_buf *buf)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+
+ return be32_to_cpu(head->length);
+}
+
+static inline u16 tpm_buf_tag(struct tpm_buf *buf)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+
+ return be16_to_cpu(head->tag);
+}
+
+static inline void tpm_buf_append(struct tpm_buf *buf,
+ const unsigned char *new_data,
+ unsigned int new_len)
+{
+ struct tpm_header *head = (struct tpm_header *)buf->data;
+ u32 len = tpm_buf_length(buf);
+
+ /* Return silently if overflow has already happened. */
+ if (buf->flags & TPM_BUF_OVERFLOW)
+ return;
+
+ if ((len + new_len) > PAGE_SIZE) {
+ WARN(1, "tpm_buf: overflow\n");
+ buf->flags |= TPM_BUF_OVERFLOW;
+ return;
+ }
+
+ memcpy(&buf->data[len], new_data, new_len);
+ head->length = cpu_to_be32(len + new_len);
+}
+
+static inline void tpm_buf_append_u8(struct tpm_buf *buf, const u8 value)
+{
+ tpm_buf_append(buf, &value, 1);
+}
+
+static inline void tpm_buf_append_u16(struct tpm_buf *buf, const u16 value)
+{
+ __be16 value2 = cpu_to_be16(value);
+
+ tpm_buf_append(buf, (u8 *) &value2, 2);
+}
+
+static inline void tpm_buf_append_u32(struct tpm_buf *buf, const u32 value)
+{
+ __be32 value2 = cpu_to_be32(value);
+
+ tpm_buf_append(buf, (u8 *) &value2, 4);
+}
+
+static inline u32 tpm2_rc_value(u32 rc)
+{
+ return (rc & BIT(7)) ? rc & 0xff : rc;
+}
+
#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
extern int tpm_is_tpm2(struct tpm_chip *chip);
@@ -170,13 +402,8 @@ extern int tpm_pcr_extend(struct tpm_chip *chip, u32 pcr_idx,
struct tpm_digest *digests);
extern int tpm_send(struct tpm_chip *chip, void *cmd, size_t buflen);
extern int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max);
-extern int tpm_seal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options);
-extern int tpm_unseal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options);
extern struct tpm_chip *tpm_default_chip(void);
+void tpm2_flush_context(struct tpm_chip *chip, u32 handle);
#else
static inline int tpm_is_tpm2(struct tpm_chip *chip)
{
@@ -204,18 +431,6 @@ static inline int tpm_get_random(struct tpm_chip *chip, u8 *data, size_t max)
return -ENODEV;
}
-static inline int tpm_seal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options)
-{
- return -ENODEV;
-}
-static inline int tpm_unseal_trusted(struct tpm_chip *chip,
- struct trusted_key_payload *payload,
- struct trusted_key_options *options)
-{
- return -ENODEV;
-}
static inline struct tpm_chip *tpm_default_chip(void)
{
return NULL;
diff --git a/include/linux/trace.h b/include/linux/trace.h
index b95ffb2188ab..7fd86d3c691f 100644
--- a/include/linux/trace.h
+++ b/include/linux/trace.h
@@ -24,6 +24,14 @@ struct trace_export {
int register_ftrace_export(struct trace_export *export);
int unregister_ftrace_export(struct trace_export *export);
+struct trace_array;
+
+void trace_printk_init_buffers(void);
+int trace_array_printk(struct trace_array *tr, unsigned long ip,
+ const char *fmt, ...);
+void trace_array_put(struct trace_array *tr);
+struct trace_array *trace_array_get_by_name(const char *name);
+int trace_array_destroy(struct trace_array *tr);
#endif /* CONFIG_TRACING */
#endif /* _LINUX_TRACE_H */
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 30a8cdcfd4a4..6c7a10a6d71e 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -11,7 +11,7 @@
#include <linux/tracepoint.h>
struct trace_array;
-struct trace_buffer;
+struct array_buffer;
struct tracer;
struct dentry;
struct bpf_prog;
@@ -45,6 +45,11 @@ const char *trace_print_array_seq(struct trace_seq *p,
const void *buf, int count,
size_t el_size);
+const char *
+trace_print_hex_dump_seq(struct trace_seq *p, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii);
+
struct trace_iterator;
struct trace_event;
@@ -74,7 +79,7 @@ struct trace_entry {
struct trace_iterator {
struct trace_array *tr;
struct tracer *trace;
- struct trace_buffer *trace_buffer;
+ struct array_buffer *array_buffer;
void *private;
int cpu_file;
struct mutex mutex;
@@ -148,7 +153,7 @@ void tracing_generic_entry_update(struct trace_entry *entry,
struct trace_event_file;
struct ring_buffer_event *
-trace_event_buffer_lock_reserve(struct ring_buffer **current_buffer,
+trace_event_buffer_lock_reserve(struct trace_buffer **current_buffer,
struct trace_event_file *trace_file,
int type, unsigned long len,
unsigned long flags, int pc);
@@ -187,6 +192,22 @@ enum trace_reg {
struct trace_event_call;
+#define TRACE_FUNCTION_TYPE ((const char *)~0UL)
+
+struct trace_event_fields {
+ const char *type;
+ union {
+ struct {
+ const char *name;
+ const int size;
+ const int align;
+ const int is_signed;
+ const int filter_type;
+ };
+ int (*define_fields)(struct trace_event_call *);
+ };
+};
+
struct trace_event_class {
const char *system;
void *probe;
@@ -195,7 +216,7 @@ struct trace_event_class {
#endif
int (*reg)(struct trace_event_call *event,
enum trace_reg type, void *data);
- int (*define_fields)(struct trace_event_call *);
+ struct trace_event_fields *fields_array;
struct list_head *(*get_fields)(struct trace_event_call *);
struct list_head fields;
int (*raw_init)(struct trace_event_call *);
@@ -205,12 +226,13 @@ extern int trace_event_reg(struct trace_event_call *event,
enum trace_reg type, void *data);
struct trace_event_buffer {
- struct ring_buffer *buffer;
+ struct trace_buffer *buffer;
struct ring_buffer_event *event;
struct trace_event_file *trace_file;
void *entry;
unsigned long flags;
int pc;
+ struct pt_regs *regs;
};
void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
@@ -343,6 +365,128 @@ enum {
EVENT_FILE_FL_WAS_ENABLED_BIT,
};
+extern struct trace_event_file *trace_get_event_file(const char *instance,
+ const char *system,
+ const char *event);
+extern void trace_put_event_file(struct trace_event_file *file);
+
+#define MAX_DYNEVENT_CMD_LEN (2048)
+
+enum dynevent_type {
+ DYNEVENT_TYPE_SYNTH = 1,
+ DYNEVENT_TYPE_KPROBE,
+ DYNEVENT_TYPE_NONE,
+};
+
+struct dynevent_cmd;
+
+typedef int (*dynevent_create_fn_t)(struct dynevent_cmd *cmd);
+
+struct dynevent_cmd {
+ struct seq_buf seq;
+ const char *event_name;
+ unsigned int n_fields;
+ enum dynevent_type type;
+ dynevent_create_fn_t run_command;
+ void *private_data;
+};
+
+extern int dynevent_create(struct dynevent_cmd *cmd);
+
+extern int synth_event_delete(const char *name);
+
+extern void synth_event_cmd_init(struct dynevent_cmd *cmd,
+ char *buf, int maxlen);
+
+extern int __synth_event_gen_cmd_start(struct dynevent_cmd *cmd,
+ const char *name,
+ struct module *mod, ...);
+
+#define synth_event_gen_cmd_start(cmd, name, mod, ...) \
+ __synth_event_gen_cmd_start(cmd, name, mod, ## __VA_ARGS__, NULL)
+
+struct synth_field_desc {
+ const char *type;
+ const char *name;
+};
+
+extern int synth_event_gen_cmd_array_start(struct dynevent_cmd *cmd,
+ const char *name,
+ struct module *mod,
+ struct synth_field_desc *fields,
+ unsigned int n_fields);
+extern int synth_event_create(const char *name,
+ struct synth_field_desc *fields,
+ unsigned int n_fields, struct module *mod);
+
+extern int synth_event_add_field(struct dynevent_cmd *cmd,
+ const char *type,
+ const char *name);
+extern int synth_event_add_field_str(struct dynevent_cmd *cmd,
+ const char *type_name);
+extern int synth_event_add_fields(struct dynevent_cmd *cmd,
+ struct synth_field_desc *fields,
+ unsigned int n_fields);
+
+#define synth_event_gen_cmd_end(cmd) \
+ dynevent_create(cmd)
+
+struct synth_event;
+
+struct synth_event_trace_state {
+ struct trace_event_buffer fbuffer;
+ struct synth_trace_event *entry;
+ struct trace_buffer *buffer;
+ struct synth_event *event;
+ unsigned int cur_field;
+ unsigned int n_u64;
+ bool disabled;
+ bool add_next;
+ bool add_name;
+};
+
+extern int synth_event_trace(struct trace_event_file *file,
+ unsigned int n_vals, ...);
+extern int synth_event_trace_array(struct trace_event_file *file, u64 *vals,
+ unsigned int n_vals);
+extern int synth_event_trace_start(struct trace_event_file *file,
+ struct synth_event_trace_state *trace_state);
+extern int synth_event_add_next_val(u64 val,
+ struct synth_event_trace_state *trace_state);
+extern int synth_event_add_val(const char *field_name, u64 val,
+ struct synth_event_trace_state *trace_state);
+extern int synth_event_trace_end(struct synth_event_trace_state *trace_state);
+
+extern int kprobe_event_delete(const char *name);
+
+extern void kprobe_event_cmd_init(struct dynevent_cmd *cmd,
+ char *buf, int maxlen);
+
+#define kprobe_event_gen_cmd_start(cmd, name, loc, ...) \
+ __kprobe_event_gen_cmd_start(cmd, false, name, loc, ## __VA_ARGS__, NULL)
+
+#define kretprobe_event_gen_cmd_start(cmd, name, loc, ...) \
+ __kprobe_event_gen_cmd_start(cmd, true, name, loc, ## __VA_ARGS__, NULL)
+
+extern int __kprobe_event_gen_cmd_start(struct dynevent_cmd *cmd,
+ bool kretprobe,
+ const char *name,
+ const char *loc, ...);
+
+#define kprobe_event_add_fields(cmd, ...) \
+ __kprobe_event_add_fields(cmd, ## __VA_ARGS__, NULL)
+
+#define kprobe_event_add_field(cmd, field) \
+ __kprobe_event_add_fields(cmd, field, NULL)
+
+extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
+
+#define kprobe_event_gen_cmd_end(cmd) \
+ dynevent_create(cmd)
+
+#define kretprobe_event_gen_cmd_end(cmd) \
+ dynevent_create(cmd)
+
/*
* Event file flags:
* ENABLED - The event is enabled
@@ -550,7 +694,8 @@ extern int trace_event_get_offsets(struct trace_event_call *call);
int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set);
int trace_set_clr_event(const char *system, const char *event, int set);
-
+int trace_array_set_clr_event(struct trace_array *tr, const char *system,
+ const char *event, bool enable);
/*
* The double __builtin_constant_p is because gcc will give us an error
* if we try to allocate the static variable to fmt if it is not a
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index 6609b39a7232..6c30508fca19 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -92,6 +92,10 @@ extern int trace_seq_path(struct trace_seq *s, const struct path *path);
extern void trace_seq_bitmask(struct trace_seq *s, const unsigned long *maskp,
int nmaskbits);
+extern int trace_seq_hex_dump(struct trace_seq *s, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii);
+
#else /* CONFIG_TRACING */
static inline void trace_seq_printf(struct trace_seq *s, const char *fmt, ...)
{
diff --git a/include/linux/tracefs.h b/include/linux/tracefs.h
index 88d279c1b863..99912445974c 100644
--- a/include/linux/tracefs.h
+++ b/include/linux/tracefs.h
@@ -28,7 +28,6 @@ struct dentry *tracefs_create_file(const char *name, umode_t mode,
struct dentry *tracefs_create_dir(const char *name, struct dentry *parent);
void tracefs_remove(struct dentry *dentry);
-void tracefs_remove_recursive(struct dentry *dentry);
struct dentry *tracefs_create_instance_dir(const char *name, struct dentry *parent,
int (*mkdir)(const char *name),
diff --git a/include/linux/transport_class.h b/include/linux/transport_class.h
index a9c59761927b..63076fb835e3 100644
--- a/include/linux/transport_class.h
+++ b/include/linux/transport_class.h
@@ -62,16 +62,16 @@ struct transport_container {
container_of(x, struct transport_container, ac)
void transport_remove_device(struct device *);
-void transport_add_device(struct device *);
+int transport_add_device(struct device *);
void transport_setup_device(struct device *);
void transport_configure_device(struct device *);
void transport_destroy_device(struct device *);
-static inline void
+static inline int
transport_register_device(struct device *dev)
{
transport_setup_device(dev);
- transport_add_device(dev);
+ return transport_add_device(dev);
}
static inline void
diff --git a/include/linux/tty.h b/include/linux/tty.h
index bfa4e2ee94a9..bd5fe0e907e8 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -225,6 +225,8 @@ struct tty_port_client_operations {
void (*write_wakeup)(struct tty_port *port);
};
+extern const struct tty_port_client_operations tty_port_default_client_ops;
+
struct tty_port {
struct tty_bufhead buf; /* Locked internally */
struct tty_struct *tty; /* Back pointer */
diff --git a/include/linux/types.h b/include/linux/types.h
index 05030f608be3..d3021c879179 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -65,11 +65,6 @@ typedef __kernel_ssize_t ssize_t;
typedef __kernel_ptrdiff_t ptrdiff_t;
#endif
-#ifndef _TIME_T
-#define _TIME_T
-typedef __kernel_time_t time_t;
-#endif
-
#ifndef _CLOCK_T
#define _CLOCK_T
typedef __kernel_clock_t clock_t;
@@ -225,5 +220,10 @@ struct callback_head {
typedef void (*rcu_callback_t)(struct rcu_head *head);
typedef void (*call_rcu_func_t)(struct rcu_head *head, rcu_callback_t func);
+typedef void (*swap_func_t)(void *a, void *b, int size);
+
+typedef int (*cmp_r_func_t)(const void *a, const void *b, const void *priv);
+typedef int (*cmp_func_t)(const void *a, const void *b);
+
#endif /* __ASSEMBLY__ */
#endif /* _LINUX_TYPES_H */
diff --git a/include/linux/u64_stats_sync.h b/include/linux/u64_stats_sync.h
index a27604f99ed0..9de5c10293f5 100644
--- a/include/linux/u64_stats_sync.h
+++ b/include/linux/u64_stats_sync.h
@@ -40,8 +40,8 @@
* spin_lock_bh(...) or other synchronization to get exclusive access
* ...
* u64_stats_update_begin(&stats->syncp);
- * stats->bytes64 += len; // non atomic operation
- * stats->packets64++; // non atomic operation
+ * u64_stats_add(&stats->bytes64, len); // non atomic operation
+ * u64_stats_inc(&stats->packets64); // non atomic operation
* u64_stats_update_end(&stats->syncp);
*
* While a consumer (reader) should use following template to get consistent
@@ -52,8 +52,8 @@
*
* do {
* start = u64_stats_fetch_begin(&stats->syncp);
- * tbytes = stats->bytes64; // non atomic operation
- * tpackets = stats->packets64; // non atomic operation
+ * tbytes = u64_stats_read(&stats->bytes64); // non atomic operation
+ * tpackets = u64_stats_read(&stats->packets64); // non atomic operation
* } while (u64_stats_fetch_retry(&stats->syncp, start));
*
*
@@ -68,6 +68,49 @@ struct u64_stats_sync {
#endif
};
+#if BITS_PER_LONG == 64
+#include <asm/local64.h>
+
+typedef struct {
+ local64_t v;
+} u64_stats_t ;
+
+static inline u64 u64_stats_read(const u64_stats_t *p)
+{
+ return local64_read(&p->v);
+}
+
+static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
+{
+ local64_add(val, &p->v);
+}
+
+static inline void u64_stats_inc(u64_stats_t *p)
+{
+ local64_inc(&p->v);
+}
+
+#else
+
+typedef struct {
+ u64 v;
+} u64_stats_t;
+
+static inline u64 u64_stats_read(const u64_stats_t *p)
+{
+ return p->v;
+}
+
+static inline void u64_stats_add(u64_stats_t *p, unsigned long val)
+{
+ p->v += val;
+}
+
+static inline void u64_stats_inc(u64_stats_t *p)
+{
+ p->v++;
+}
+#endif
static inline void u64_stats_init(struct u64_stats_sync *syncp)
{
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index d4ee6e942562..67f016010aad 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -311,6 +311,7 @@ copy_struct_from_user(void *dst, size_t ksize, const void __user *src,
* happens, handle that and return -EFAULT.
*/
extern long probe_kernel_read(void *dst, const void *src, size_t size);
+extern long probe_kernel_read_strict(void *dst, const void *src, size_t size);
extern long __probe_kernel_read(void *dst, const void *src, size_t size);
/*
@@ -337,7 +338,22 @@ extern long __probe_user_read(void *dst, const void __user *src, size_t size);
extern long notrace probe_kernel_write(void *dst, const void *src, size_t size);
extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size);
+/*
+ * probe_user_write(): safely attempt to write to a location in user space
+ * @dst: address to write to
+ * @src: pointer to the data that shall be written
+ * @size: size of the data chunk
+ *
+ * Safely write to address @dst from the buffer at @src. If a kernel fault
+ * happens, handle that and return -EFAULT.
+ */
+extern long notrace probe_user_write(void __user *dst, const void *src, size_t size);
+extern long notrace __probe_user_write(void __user *dst, const void *src, size_t size);
+
extern long strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
+extern long strncpy_from_unsafe_strict(char *dst, const void *unsafe_addr,
+ long count);
+extern long __strncpy_from_unsafe(char *dst, const void *unsafe_addr, long count);
extern long strncpy_from_unsafe_user(char *dst, const void __user *unsafe_addr,
long count);
extern long strnlen_unsafe_user(const void __user *unsafe_addr, long count);
diff --git a/include/linux/uio.h b/include/linux/uio.h
index ab5f523bc0df..9576fd8158d7 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -45,8 +45,8 @@ struct iov_iter {
union {
unsigned long nr_segs;
struct {
- int idx;
- int start_idx;
+ unsigned int head;
+ unsigned int start_head;
};
};
};
diff --git a/include/linux/units.h b/include/linux/units.h
new file mode 100644
index 000000000000..aaf716364ec3
--- /dev/null
+++ b/include/linux/units.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_UNITS_H
+#define _LINUX_UNITS_H
+
+#include <linux/kernel.h>
+
+#define ABSOLUTE_ZERO_MILLICELSIUS -273150
+
+static inline long milli_kelvin_to_millicelsius(long t)
+{
+ return t + ABSOLUTE_ZERO_MILLICELSIUS;
+}
+
+static inline long millicelsius_to_milli_kelvin(long t)
+{
+ return t - ABSOLUTE_ZERO_MILLICELSIUS;
+}
+
+#define MILLIDEGREE_PER_DEGREE 1000
+#define MILLIDEGREE_PER_DECIDEGREE 100
+
+static inline long kelvin_to_millicelsius(long t)
+{
+ return milli_kelvin_to_millicelsius(t * MILLIDEGREE_PER_DEGREE);
+}
+
+static inline long millicelsius_to_kelvin(long t)
+{
+ t = millicelsius_to_milli_kelvin(t);
+
+ return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DEGREE);
+}
+
+static inline long deci_kelvin_to_celsius(long t)
+{
+ t = milli_kelvin_to_millicelsius(t * MILLIDEGREE_PER_DECIDEGREE);
+
+ return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DEGREE);
+}
+
+static inline long celsius_to_deci_kelvin(long t)
+{
+ t = millicelsius_to_milli_kelvin(t * MILLIDEGREE_PER_DEGREE);
+
+ return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DECIDEGREE);
+}
+
+/**
+ * deci_kelvin_to_millicelsius_with_offset - convert Kelvin to Celsius
+ * @t: temperature value in decidegrees Kelvin
+ * @offset: difference between Kelvin and Celsius in millidegrees
+ *
+ * Return: temperature value in millidegrees Celsius
+ */
+static inline long deci_kelvin_to_millicelsius_with_offset(long t, long offset)
+{
+ return t * MILLIDEGREE_PER_DECIDEGREE - offset;
+}
+
+static inline long deci_kelvin_to_millicelsius(long t)
+{
+ return milli_kelvin_to_millicelsius(t * MILLIDEGREE_PER_DECIDEGREE);
+}
+
+static inline long millicelsius_to_deci_kelvin(long t)
+{
+ t = millicelsius_to_milli_kelvin(t);
+
+ return DIV_ROUND_CLOSEST(t, MILLIDEGREE_PER_DECIDEGREE);
+}
+
+static inline long kelvin_to_celsius(long t)
+{
+ return t + DIV_ROUND_CLOSEST(ABSOLUTE_ZERO_MILLICELSIUS,
+ MILLIDEGREE_PER_DEGREE);
+}
+
+static inline long celsius_to_kelvin(long t)
+{
+ return t - DIV_ROUND_CLOSEST(ABSOLUTE_ZERO_MILLICELSIUS,
+ MILLIDEGREE_PER_DEGREE);
+}
+
+#endif /* _LINUX_UNITS_H */
diff --git a/include/linux/usb.h b/include/linux/usb.h
index e656e7b4b1e4..9f3c721c70dc 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -325,7 +325,7 @@ struct usb_interface_cache {
/* variable-length array of alternate settings for this interface,
* stored in no particular order */
- struct usb_host_interface altsetting[0];
+ struct usb_host_interface altsetting[];
};
#define ref_to_usb_interface_cache(r) \
container_of(r, struct usb_interface_cache, ref)
@@ -708,6 +708,7 @@ struct usb_device {
unsigned lpm_disable_count;
u16 hub_delay;
+ unsigned use_generic_driver:1;
};
#define to_usb_device(d) container_of(d, struct usb_device, dev)
@@ -1228,12 +1229,16 @@ struct usb_driver {
* @drvwrap: Driver-model core structure wrapper.
* @supports_autosuspend: if set to 0, the USB core will not allow autosuspend
* for devices bound to this driver.
+ * @generic_subclass: if set to 1, the generic USB driver's probe, disconnect,
+ * resume and suspend functions will be called in addition to the driver's
+ * own, so this part of the setup does not need to be replicated.
*
* USB drivers must provide all the fields listed above except drvwrap.
*/
struct usb_device_driver {
const char *name;
+ bool (*match) (struct usb_device *udev);
int (*probe) (struct usb_device *udev);
void (*disconnect) (struct usb_device *udev);
@@ -1241,7 +1246,9 @@ struct usb_device_driver {
int (*resume) (struct usb_device *udev, pm_message_t message);
const struct attribute_group **dev_groups;
struct usbdrv_wrap drvwrap;
+ const struct usb_device_id *id_table;
unsigned int supports_autosuspend:1;
+ unsigned int generic_subclass:1;
};
#define to_usb_device_driver(d) container_of(d, struct usb_device_driver, \
drvwrap.driver)
@@ -1582,7 +1589,7 @@ struct urb {
int error_count; /* (return) number of ISO errors */
void *context; /* (in) context for completion */
usb_complete_t complete; /* (in) completion routine */
- struct usb_iso_packet_descriptor iso_frame_desc[0];
+ struct usb_iso_packet_descriptor iso_frame_desc[];
/* (in) ISO ONLY */
};
diff --git a/include/linux/usb/audio-v2.h b/include/linux/usb/audio-v2.h
index ba4b3e3327ff..5e31740c7e40 100644
--- a/include/linux/usb/audio-v2.h
+++ b/include/linux/usb/audio-v2.h
@@ -153,7 +153,7 @@ struct uac2_feature_unit_descriptor {
__u8 bSourceID;
/* bmaControls is actually u32,
* but u8 is needed for the hybrid parser */
- __u8 bmaControls[0]; /* variable length */
+ __u8 bmaControls[]; /* variable length */
} __attribute__((packed));
/* 4.9.2 Class-Specific AS Interface Descriptor */
diff --git a/include/linux/usb/audio-v3.h b/include/linux/usb/audio-v3.h
index 6b708434b7f9..c69a6f2e6837 100644
--- a/include/linux/usb/audio-v3.h
+++ b/include/linux/usb/audio-v3.h
@@ -109,7 +109,7 @@ struct uac3_feature_unit_descriptor {
__u8 bSourceID;
/* bmaControls is actually u32,
* but u8 is needed for the hybrid parser */
- __u8 bmaControls[0]; /* variable length */
+ __u8 bmaControls[]; /* variable length */
/* wFeatureDescrStr omitted */
} __attribute__((packed));
diff --git a/include/linux/usb/ehci_def.h b/include/linux/usb/ehci_def.h
index a15ce99dfc2d..78e006355557 100644
--- a/include/linux/usb/ehci_def.h
+++ b/include/linux/usb/ehci_def.h
@@ -151,7 +151,7 @@ struct ehci_regs {
#define PORT_OWNER (1<<13) /* true: companion hc owns this port */
#define PORT_POWER (1<<12) /* true: has power (see PPC) */
#define PORT_USB11(x) (((x)&(3<<10)) == (1<<10)) /* USB 1.1 device */
-/* 11:10 for detecting lowspeed devices (reset vs release ownership) */
+#define PORT_LS_MASK (3<<10) /* Link status (SE0, K or J */
/* 9 reserved */
#define PORT_LPM (1<<9) /* LPM transaction */
#define PORT_RESET (1<<8) /* reset port */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 124462d65eac..9411c08a5c7e 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -767,7 +767,7 @@ struct usb_gadget_strings {
struct usb_gadget_string_container {
struct list_head list;
- u8 *stash[0];
+ u8 *stash[];
};
/* put descriptor for string with that id into buf (buflen >= 256) */
diff --git a/include/linux/usb/gpio_vbus.h b/include/linux/usb/gpio_vbus.h
deleted file mode 100644
index 804fb06cf6d6..000000000000
--- a/include/linux/usb/gpio_vbus.h
+++ /dev/null
@@ -1,33 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * A simple GPIO VBUS sensing driver for B peripheral only devices
- * with internal transceivers.
- * Optionally D+ pullup can be controlled by a second GPIO.
- *
- * Copyright (c) 2008 Philipp Zabel <philipp.zabel@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- */
-
-/**
- * struct gpio_vbus_mach_info - configuration for gpio_vbus
- * @gpio_vbus: VBUS sensing GPIO
- * @gpio_pullup: optional D+ or D- pullup GPIO (else negative/invalid)
- * @gpio_vbus_inverted: true if gpio_vbus is active low
- * @gpio_pullup_inverted: true if gpio_pullup is active low
- * @wakeup: configure gpio_vbus as a wake-up source
- *
- * The VBUS sensing GPIO should have a pulldown, which will normally be
- * part of a resistor ladder turning a 4.0V-5.25V level on VBUS into a
- * value the GPIO detects as active. Some systems will use comparators.
- */
-struct gpio_vbus_mach_info {
- int gpio_vbus;
- int gpio_pullup;
- bool gpio_vbus_inverted;
- bool gpio_pullup_inverted;
- bool wakeup;
-};
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 712b2a603645..e12105ed3834 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -228,7 +228,7 @@ struct usb_hcd {
/* The HC driver's private data is stored at the end of
* this structure.
*/
- unsigned long hcd_priv[0]
+ unsigned long hcd_priv[]
__attribute__ ((aligned(sizeof(s64))));
};
diff --git a/include/linux/usb/irda.h b/include/linux/usb/irda.h
index 396d2b043e64..556a801efce3 100644
--- a/include/linux/usb/irda.h
+++ b/include/linux/usb/irda.h
@@ -119,11 +119,22 @@ struct usb_irda_cs_descriptor {
* 6 - 115200 bps
* 7 - 576000 bps
* 8 - 1.152 Mbps
- * 9 - 5 mbps
+ * 9 - 4 Mbps
* 10..15 - Reserved
*/
#define USB_IRDA_STATUS_LINK_SPEED 0x0f
+#define USB_IRDA_LS_NO_CHANGE 0
+#define USB_IRDA_LS_2400 1
+#define USB_IRDA_LS_9600 2
+#define USB_IRDA_LS_19200 3
+#define USB_IRDA_LS_38400 4
+#define USB_IRDA_LS_57600 5
+#define USB_IRDA_LS_115200 6
+#define USB_IRDA_LS_576000 7
+#define USB_IRDA_LS_1152000 8
+#define USB_IRDA_LS_4000000 9
+
/* The following is a 4-bit value used only for
* outbound header:
*
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index 145c38e351c2..a665d7f21142 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -45,7 +45,8 @@ enum pd_data_msg_type {
PD_DATA_BATT_STATUS = 5,
PD_DATA_ALERT = 6,
PD_DATA_GET_COUNTRY_INFO = 7,
- /* 8-14 Reserved */
+ PD_DATA_ENTER_USB = 8,
+ /* 9-14 Reserved */
PD_DATA_VENDOR_DEF = 15,
/* 16-31 Reserved */
};
@@ -418,6 +419,36 @@ static inline unsigned int rdo_max_power(u32 rdo)
return ((rdo >> RDO_BATT_MAX_PWR_SHIFT) & RDO_PWR_MASK) * 250;
}
+/* Enter_USB Data Object */
+#define EUDO_USB_MODE_MASK GENMASK(30, 28)
+#define EUDO_USB_MODE_SHIFT 28
+#define EUDO_USB_MODE_USB2 0
+#define EUDO_USB_MODE_USB3 1
+#define EUDO_USB_MODE_USB4 2
+#define EUDO_USB4_DRD BIT(26)
+#define EUDO_USB3_DRD BIT(25)
+#define EUDO_CABLE_SPEED_MASK GENMASK(23, 21)
+#define EUDO_CABLE_SPEED_SHIFT 21
+#define EUDO_CABLE_SPEED_USB2 0
+#define EUDO_CABLE_SPEED_USB3_GEN1 1
+#define EUDO_CABLE_SPEED_USB4_GEN2 2
+#define EUDO_CABLE_SPEED_USB4_GEN3 3
+#define EUDO_CABLE_TYPE_MASK GENMASK(20, 19)
+#define EUDO_CABLE_TYPE_SHIFT 19
+#define EUDO_CABLE_TYPE_PASSIVE 0
+#define EUDO_CABLE_TYPE_RE_TIMER 1
+#define EUDO_CABLE_TYPE_RE_DRIVER 2
+#define EUDO_CABLE_TYPE_OPTICAL 3
+#define EUDO_CABLE_CURRENT_MASK GENMASK(18, 17)
+#define EUDO_CABLE_CURRENT_SHIFT 17
+#define EUDO_CABLE_CURRENT_NOTSUPP 0
+#define EUDO_CABLE_CURRENT_3A 2
+#define EUDO_CABLE_CURRENT_5A 3
+#define EUDO_PCIE_SUPPORT BIT(16)
+#define EUDO_DP_SUPPORT BIT(15)
+#define EUDO_TBT_SUPPORT BIT(14)
+#define EUDO_HOST_PRESENT BIT(13)
+
/* USB PD timers and counters */
#define PD_T_NO_RESPONSE 5000 /* 4.5 - 5.5 seconds */
#define PD_T_DB_DETECT 10000 /* 10 - 15 seconds */
diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h
index 781f4e98dd23..35b8e15efaa0 100644
--- a/include/linux/usb/pd_vdo.h
+++ b/include/linux/usb/pd_vdo.h
@@ -140,6 +140,38 @@
#define PD_PRODUCT_PID(vdo) (((vdo) >> 16) & 0xffff)
/*
+ * UFP VDO1
+ * --------
+ * <31:29> :: UFP VDO version
+ * <28> :: Reserved
+ * <27:24> :: Device capability
+ * <23:6> :: Reserved
+ * <5:3> :: Alternate modes
+ * <2:0> :: USB highest speed
+ */
+#define PD_VDO1_UFP_DEVCAP(vdo) (((vdo) & GENMASK(27, 24)) >> 24)
+
+#define DEV_USB2_CAPABLE BIT(0)
+#define DEV_USB2_BILLBOARD BIT(1)
+#define DEV_USB3_CAPABLE BIT(2)
+#define DEV_USB4_CAPABLE BIT(3)
+
+/*
+ * DFP VDO
+ * --------
+ * <31:29> :: DFP VDO version
+ * <28:27> :: Reserved
+ * <26:24> :: Host capability
+ * <23:5> :: Reserved
+ * <4:0> :: Port number
+ */
+#define PD_VDO_DFP_HOSTCAP(vdo) (((vdo) & GENMASK(26, 24)) >> 24)
+
+#define HOST_USB2_CAPABLE BIT(0)
+#define HOST_USB3_CAPABLE BIT(1)
+#define HOST_USB4_CAPABLE BIT(2)
+
+/*
* Cable VDO
* ---------
* <31:28> :: Cable HW version
diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h
index a1be64c9940f..22c1f579afe3 100644
--- a/include/linux/usb/quirks.h
+++ b/include/linux/usb/quirks.h
@@ -69,4 +69,7 @@
/* Hub needs extra delay after resetting its port. */
#define USB_QUIRK_HUB_SLOW_RESET BIT(14)
+/* device has blacklisted endpoints */
+#define USB_QUIRK_ENDPOINT_BLACKLIST BIT(15)
+
#endif /* __LINUX_USB_QUIRKS_H */
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index 6914475bbc86..d418c55523a7 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -170,8 +170,6 @@ struct renesas_usbhs_driver_param {
*/
int pio_dma_border; /* default is 64byte */
- u32 enable_gpio;
-
/*
* option:
*/
diff --git a/include/linux/usb/role.h b/include/linux/usb/role.h
index 2d77f97df72d..0164fed31b06 100644
--- a/include/linux/usb/role.h
+++ b/include/linux/usb/role.h
@@ -13,8 +13,9 @@ enum usb_role {
USB_ROLE_DEVICE,
};
-typedef int (*usb_role_switch_set_t)(struct device *dev, enum usb_role role);
-typedef enum usb_role (*usb_role_switch_get_t)(struct device *dev);
+typedef int (*usb_role_switch_set_t)(struct usb_role_switch *sw,
+ enum usb_role role);
+typedef enum usb_role (*usb_role_switch_get_t)(struct usb_role_switch *sw);
/**
* struct usb_role_switch_desc - USB Role Switch Descriptor
@@ -25,6 +26,8 @@ typedef enum usb_role (*usb_role_switch_get_t)(struct device *dev);
* @set: Callback for setting the role
* @get: Callback for getting the role (optional)
* @allow_userspace_control: If true userspace may change the role through sysfs
+ * @driver_data: Private data pointer
+ * @name: Name for the switch (optional)
*
* @usb2_port and @usb3_port will point to the USB host port and @udc to the USB
* device controller behind the USB connector with the role switch. If
@@ -40,6 +43,8 @@ struct usb_role_switch_desc {
usb_role_switch_set_t set;
usb_role_switch_get_t get;
bool allow_userspace_control;
+ void *driver_data;
+ const char *name;
};
@@ -51,9 +56,15 @@ struct usb_role_switch *fwnode_usb_role_switch_get(struct fwnode_handle *node);
void usb_role_switch_put(struct usb_role_switch *sw);
struct usb_role_switch *
+usb_role_switch_find_by_fwnode(const struct fwnode_handle *fwnode);
+
+struct usb_role_switch *
usb_role_switch_register(struct device *parent,
const struct usb_role_switch_desc *desc);
void usb_role_switch_unregister(struct usb_role_switch *sw);
+
+void usb_role_switch_set_drvdata(struct usb_role_switch *sw, void *data);
+void *usb_role_switch_get_drvdata(struct usb_role_switch *sw);
#else
static inline int usb_role_switch_set_role(struct usb_role_switch *sw,
enum usb_role role)
@@ -87,6 +98,17 @@ usb_role_switch_register(struct device *parent,
}
static inline void usb_role_switch_unregister(struct usb_role_switch *sw) { }
+
+static inline void
+usb_role_switch_set_drvdata(struct usb_role_switch *sw, void *data)
+{
+}
+
+static inline void *usb_role_switch_get_drvdata(struct usb_role_switch *sw)
+{
+ return NULL;
+}
+
#endif
#endif /* __LINUX_USB_ROLE_H */
diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h
index f516955a0cf4..e7979c01c351 100644
--- a/include/linux/usb/tcpm.h
+++ b/include/linux/usb/tcpm.h
@@ -46,45 +46,6 @@ enum tcpm_transmit_type {
TCPC_TX_BIST_MODE_2 = 7
};
-/**
- * struct tcpc_config - Port configuration
- * @src_pdo: PDO parameters sent to port partner as response to
- * PD_CTRL_GET_SOURCE_CAP message
- * @nr_src_pdo: Number of entries in @src_pdo
- * @snk_pdo: PDO parameters sent to partner as response to
- * PD_CTRL_GET_SINK_CAP message
- * @nr_snk_pdo: Number of entries in @snk_pdo
- * @operating_snk_mw:
- * Required operating sink power in mW
- * @type: Port type (TYPEC_PORT_DFP, TYPEC_PORT_UFP, or
- * TYPEC_PORT_DRP)
- * @default_role:
- * Default port role (TYPEC_SINK or TYPEC_SOURCE).
- * Set to TYPEC_NO_PREFERRED_ROLE if no default role.
- * @try_role_hw:True if try.{Src,Snk} is implemented in hardware
- * @alt_modes: List of supported alternate modes
- */
-struct tcpc_config {
- const u32 *src_pdo;
- unsigned int nr_src_pdo;
-
- const u32 *snk_pdo;
- unsigned int nr_snk_pdo;
-
- const u32 *snk_vdo;
- unsigned int nr_snk_vdo;
-
- unsigned int operating_snk_mw;
-
- enum typec_port_type type;
- enum typec_port_data data;
- enum typec_role default_role;
- bool try_role_hw; /* try.{src,snk} implemented in hardware */
- bool self_powered; /* port belongs to a self powered device */
-
- const struct typec_altmode_desc *alt_modes;
-};
-
/* Mux state attributes */
#define TCPC_MUX_USB_ENABLED BIT(0) /* USB enabled */
#define TCPC_MUX_DP_ENABLED BIT(1) /* DP enabled */
@@ -92,7 +53,6 @@ struct tcpc_config {
/**
* struct tcpc_dev - Port configuration and callback functions
- * @config: Pointer to port configuration
* @fwnode: Pointer to port fwnode
* @get_vbus: Called to read current VBUS state
* @get_current_limit:
@@ -121,7 +81,6 @@ struct tcpc_config {
* @mux: Pointer to multiplexer data
*/
struct tcpc_dev {
- const struct tcpc_config *config;
struct fwnode_handle *fwnode;
int (*init)(struct tcpc_dev *dev);
diff --git a/include/linux/usb/tegra_usb_phy.h b/include/linux/usb/tegra_usb_phy.h
index 0c5c3ea8b2d7..c29d1b4c9381 100644
--- a/include/linux/usb/tegra_usb_phy.h
+++ b/include/linux/usb/tegra_usb_phy.h
@@ -17,6 +17,7 @@
#define __TEGRA_USB_PHY_H
#include <linux/clk.h>
+#include <linux/gpio.h>
#include <linux/reset.h>
#include <linux/usb/otg.h>
@@ -76,8 +77,9 @@ struct tegra_usb_phy {
struct usb_phy u_phy;
bool is_legacy_phy;
bool is_ulpi_phy;
- int reset_gpio;
+ struct gpio_desc *reset_gpio;
struct reset_control *pad_rst;
+ bool powered_on;
};
void tegra_usb_phy_preresume(struct usb_phy *phy);
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index 7df4ecabc78a..b00a2642a9cd 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -9,6 +9,9 @@
#define USB_TYPEC_REV_1_0 0x100 /* 1.0 */
#define USB_TYPEC_REV_1_1 0x110 /* 1.1 */
#define USB_TYPEC_REV_1_2 0x120 /* 1.2 */
+#define USB_TYPEC_REV_1_3 0x130 /* 1.3 */
+#define USB_TYPEC_REV_1_4 0x140 /* 1.4 */
+#define USB_TYPEC_REV_2_0 0x200 /* 2.0 */
struct typec_partner;
struct typec_cable;
@@ -74,6 +77,7 @@ enum typec_orientation {
* @id_header: ID Header VDO
* @cert_stat: Cert Stat VDO
* @product: Product VDO
+ * @vdo: Product Type Specific VDOs
*
* USB power delivery Discover Identity command response data.
*
@@ -84,6 +88,7 @@ struct usb_pd_identity {
u32 id_header;
u32 cert_stat;
u32 product;
+ u32 vdo[3];
};
int typec_partner_set_identity(struct typec_partner *partner);
@@ -168,6 +173,23 @@ struct typec_partner_desc {
struct usb_pd_identity *identity;
};
+/**
+ * struct typec_operations - USB Type-C Port Operations
+ * @try_role: Set data role preference for DRP port
+ * @dr_set: Set Data Role
+ * @pr_set: Set Power Role
+ * @vconn_set: Source VCONN
+ * @port_type_set: Set port type
+ */
+struct typec_operations {
+ int (*try_role)(struct typec_port *port, int role);
+ int (*dr_set)(struct typec_port *port, enum typec_data_role role);
+ int (*pr_set)(struct typec_port *port, enum typec_role role);
+ int (*vconn_set)(struct typec_port *port, enum typec_role role);
+ int (*port_type_set)(struct typec_port *port,
+ enum typec_port_type type);
+};
+
/*
* struct typec_capability - USB Type-C Port Capabilities
* @type: Supported power role of the port
@@ -176,14 +198,9 @@ struct typec_partner_desc {
* @pd_revision: USB Power Delivery Specification revision if supported
* @prefer_role: Initial role preference (DRP ports).
* @accessory: Supported Accessory Modes
- * @sw: Cable plug orientation switch
- * @mux: Multiplexer switch for Alternate/Accessory Modes
* @fwnode: Optional fwnode of the port
- * @try_role: Set data role preference for DRP port
- * @dr_set: Set Data Role
- * @pr_set: Set Power Role
- * @vconn_set: Set VCONN Role
- * @port_type_set: Set port type
+ * @driver_data: Private pointer for driver specific info
+ * @ops: Port operations vector
*
* Static capabilities of a single USB Type-C port.
*/
@@ -194,22 +211,12 @@ struct typec_capability {
u16 pd_revision; /* 0300H = "3.0" */
int prefer_role;
enum typec_accessory accessory[TYPEC_MAX_ACCESSORY];
+ unsigned int orientation_aware:1;
- struct typec_switch *sw;
- struct typec_mux *mux;
struct fwnode_handle *fwnode;
+ void *driver_data;
- int (*try_role)(const struct typec_capability *,
- int role);
-
- int (*dr_set)(const struct typec_capability *,
- enum typec_data_role);
- int (*pr_set)(const struct typec_capability *,
- enum typec_role);
- int (*vconn_set)(const struct typec_capability *,
- enum typec_role);
- int (*port_type_set)(const struct typec_capability *,
- enum typec_port_type);
+ const struct typec_operations *ops;
};
/* Specific to try_role(). Indicates the user want's to clear the preference. */
@@ -227,6 +234,10 @@ struct typec_cable *typec_register_cable(struct typec_port *port,
struct typec_cable_desc *desc);
void typec_unregister_cable(struct typec_cable *cable);
+struct typec_cable *typec_cable_get(struct typec_port *port);
+void typec_cable_put(struct typec_cable *cable);
+int typec_cable_is_active(struct typec_cable *cable);
+
struct typec_plug *typec_register_plug(struct typec_cable *cable,
struct typec_plug_desc *desc);
void typec_unregister_plug(struct typec_plug *plug);
@@ -241,6 +252,8 @@ int typec_set_orientation(struct typec_port *port,
enum typec_orientation typec_get_orientation(struct typec_port *port);
int typec_set_mode(struct typec_port *port, int mode);
+void *typec_get_drvdata(struct typec_port *port);
+
int typec_find_port_power_role(const char *name);
int typec_find_power_role(const char *name);
int typec_find_port_data_role(const char *name);
diff --git a/include/linux/usb/typec_altmode.h b/include/linux/usb/typec_altmode.h
index 9a88c74a1d0d..d834e236c6df 100644
--- a/include/linux/usb/typec_altmode.h
+++ b/include/linux/usb/typec_altmode.h
@@ -55,7 +55,7 @@ static inline void *typec_altmode_get_drvdata(struct typec_altmode *altmode)
* @activate: User callback for Enter/Exit Mode
*/
struct typec_altmode_ops {
- int (*enter)(struct typec_altmode *altmode);
+ int (*enter)(struct typec_altmode *altmode, u32 *vdo);
int (*exit)(struct typec_altmode *altmode);
void (*attention)(struct typec_altmode *altmode, u32 vdo);
int (*vdm)(struct typec_altmode *altmode, const u32 hdr,
@@ -65,7 +65,7 @@ struct typec_altmode_ops {
int (*activate)(struct typec_altmode *altmode, int activate);
};
-int typec_altmode_enter(struct typec_altmode *altmode);
+int typec_altmode_enter(struct typec_altmode *altmode, u32 *vdo);
int typec_altmode_exit(struct typec_altmode *altmode);
void typec_altmode_attention(struct typec_altmode *altmode, u32 vdo);
int typec_altmode_vdm(struct typec_altmode *altmode,
@@ -101,6 +101,22 @@ enum {
TYPEC_MODE_DEBUG, /* Debug Accessory */
};
+/*
+ * USB4 also requires that the pins on the connector are repurposed, just like
+ * Alternate Modes. USB4 mode is however not entered with the Enter Mode Command
+ * like the Alternate Modes are, but instead with a special Enter_USB Message.
+ * The Enter_USB Message can also be used for setting to connector to operate in
+ * USB 3.2 or in USB 2.0 mode instead of USB4.
+ *
+ * The Enter_USB specific "USB Modes" are also supplied here as special modal
+ * state values, just like the Accessory Modes.
+ */
+enum {
+ TYPEC_MODE_USB2 = TYPEC_MODE_DEBUG, /* USB 2.0 mode */
+ TYPEC_MODE_USB3, /* USB 3.2 mode */
+ TYPEC_MODE_USB4 /* USB4 mode */
+};
+
#define TYPEC_MODAL_STATE(_state_) ((_state_) + TYPEC_STATE_MODAL)
struct typec_altmode *typec_altmode_get_plug(struct typec_altmode *altmode,
@@ -110,13 +126,6 @@ void typec_altmode_put_plug(struct typec_altmode *plug);
struct typec_altmode *typec_match_altmode(struct typec_altmode **altmodes,
size_t n, u16 svid, u8 mode);
-struct typec_altmode *
-typec_altmode_register_notifier(struct device *dev, u16 svid, u8 mode,
- struct notifier_block *nb);
-
-void typec_altmode_unregister_notifier(struct typec_altmode *adev,
- struct notifier_block *nb);
-
/**
* typec_altmode_get_orientation - Get cable plug orientation
* altmode: Handle to the alternate mode
diff --git a/include/linux/usb/typec_mux.h b/include/linux/usb/typec_mux.h
index 873ace5b0cf8..a9d9957933dc 100644
--- a/include/linux/usb/typec_mux.h
+++ b/include/linux/usb/typec_mux.h
@@ -3,11 +3,13 @@
#ifndef __USB_TYPEC_MUX
#define __USB_TYPEC_MUX
+#include <linux/property.h>
#include <linux/usb/typec.h>
struct device;
struct typec_mux;
struct typec_switch;
+struct typec_altmode;
struct fwnode_handle;
typedef int (*typec_switch_set_fn_t)(struct typec_switch *sw,
@@ -16,11 +18,20 @@ typedef int (*typec_switch_set_fn_t)(struct typec_switch *sw,
struct typec_switch_desc {
struct fwnode_handle *fwnode;
typec_switch_set_fn_t set;
+ const char *name;
void *drvdata;
};
-struct typec_switch *typec_switch_get(struct device *dev);
+struct typec_switch *fwnode_typec_switch_get(struct fwnode_handle *fwnode);
void typec_switch_put(struct typec_switch *sw);
+int typec_switch_set(struct typec_switch *sw,
+ enum typec_orientation orientation);
+
+static inline struct typec_switch *typec_switch_get(struct device *dev)
+{
+ return fwnode_typec_switch_get(dev_fwnode(dev));
+}
+
struct typec_switch *
typec_switch_register(struct device *parent,
const struct typec_switch_desc *desc);
@@ -29,17 +40,33 @@ void typec_switch_unregister(struct typec_switch *sw);
void typec_switch_set_drvdata(struct typec_switch *sw, void *data);
void *typec_switch_get_drvdata(struct typec_switch *sw);
-typedef int (*typec_mux_set_fn_t)(struct typec_mux *mux, int state);
+struct typec_mux_state {
+ struct typec_altmode *alt;
+ unsigned long mode;
+ void *data;
+};
+
+typedef int (*typec_mux_set_fn_t)(struct typec_mux *mux,
+ struct typec_mux_state *state);
struct typec_mux_desc {
struct fwnode_handle *fwnode;
typec_mux_set_fn_t set;
+ const char *name;
void *drvdata;
};
-struct typec_mux *
-typec_mux_get(struct device *dev, const struct typec_altmode_desc *desc);
+struct typec_mux *fwnode_typec_mux_get(struct fwnode_handle *fwnode,
+ const struct typec_altmode_desc *desc);
void typec_mux_put(struct typec_mux *mux);
+int typec_mux_set(struct typec_mux *mux, struct typec_mux_state *state);
+
+static inline struct typec_mux *
+typec_mux_get(struct device *dev, const struct typec_altmode_desc *desc)
+{
+ return fwnode_typec_mux_get(dev_fwnode(dev), desc);
+}
+
struct typec_mux *
typec_mux_register(struct device *parent, const struct typec_mux_desc *desc);
void typec_mux_unregister(struct typec_mux *mux);
diff --git a/include/linux/usb/typec_tbt.h b/include/linux/usb/typec_tbt.h
new file mode 100644
index 000000000000..47c2d501ddce
--- /dev/null
+++ b/include/linux/usb/typec_tbt.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __USB_TYPEC_TBT_H
+#define __USB_TYPEC_TBT_H
+
+#include <linux/usb/typec_altmode.h>
+
+#define USB_TYPEC_VENDOR_INTEL 0x8087
+/* Alias for convenience */
+#define USB_TYPEC_TBT_SID USB_TYPEC_VENDOR_INTEL
+
+/* Connector state for Thunderbolt3 */
+#define TYPEC_TBT_MODE TYPEC_STATE_MODAL
+
+/**
+ * struct typec_thunderbolt_data - Thundebolt3 Alt Mode specific data
+ * @device_mode: Device Discover Mode VDO
+ * @cable_mode: Cable Discover Mode VDO
+ * @enter_vdo: Enter Mode VDO
+ */
+struct typec_thunderbolt_data {
+ u32 device_mode;
+ u32 cable_mode;
+ u32 enter_vdo;
+};
+
+/* TBT3 Device Discover Mode VDO bits */
+#define TBT_MODE BIT(0)
+#define TBT_ADAPTER(_vdo_) (((_vdo_) & BIT(16)) >> 16)
+#define TBT_ADAPTER_LEGACY 0
+#define TBT_ADAPTER_TBT3 1
+#define TBT_INTEL_SPECIFIC_B0 BIT(26)
+#define TBT_VENDOR_SPECIFIC_B0 BIT(30)
+#define TBT_VENDOR_SPECIFIC_B1 BIT(31)
+
+#define TBT_SET_ADAPTER(a) (((a) & 1) << 16)
+
+/* TBT3 Cable Discover Mode VDO bits */
+#define TBT_CABLE_SPEED(_vdo_) (((_vdo_) & GENMASK(18, 16)) >> 16)
+#define TBT_CABLE_USB3_GEN1 1
+#define TBT_CABLE_USB3_PASSIVE 2
+#define TBT_CABLE_10_AND_20GBPS 3
+#define TBT_CABLE_ROUNDED BIT(19)
+#define TBT_CABLE_OPTICAL BIT(21)
+#define TBT_CABLE_RETIMER BIT(22)
+#define TBT_CABLE_LINK_TRAINING BIT(23)
+
+#define TBT_SET_CABLE_SPEED(_s_) (((_s_) & GENMASK(2, 0)) << 16)
+
+/* TBT3 Device Enter Mode VDO bits */
+#define TBT_ENTER_MODE_CABLE_SPEED(s) TBT_SET_CABLE_SPEED(s)
+#define TBT_ENTER_MODE_ACTIVE_CABLE BIT(24)
+
+#endif /* __USB_TYPEC_TBT_H */
diff --git a/include/linux/usb/ulpi.h b/include/linux/usb/ulpi.h
index c515765adab7..36c2982780ad 100644
--- a/include/linux/usb/ulpi.h
+++ b/include/linux/usb/ulpi.h
@@ -55,12 +55,23 @@
#if IS_ENABLED(CONFIG_USB_ULPI)
struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops,
unsigned int flags);
+
+struct usb_phy *devm_otg_ulpi_create(struct device *dev,
+ struct usb_phy_io_ops *ops,
+ unsigned int flags);
#else
static inline struct usb_phy *otg_ulpi_create(struct usb_phy_io_ops *ops,
unsigned int flags)
{
return NULL;
}
+
+static inline struct usb_phy *devm_otg_ulpi_create(struct device *dev,
+ struct usb_phy_io_ops *ops,
+ unsigned int flags)
+{
+ return NULL;
+}
#endif
#ifdef CONFIG_USB_ULPI_VIEWPORT
diff --git a/include/linux/usb/usb_phy_generic.h b/include/linux/usb/usb_phy_generic.h
index 7408cf52c710..cd9e70a552a0 100644
--- a/include/linux/usb/usb_phy_generic.h
+++ b/include/linux/usb/usb_phy_generic.h
@@ -3,18 +3,6 @@
#define __LINUX_USB_NOP_XCEIV_H
#include <linux/usb/otg.h>
-#include <linux/gpio/consumer.h>
-
-struct usb_phy_generic_platform_data {
- enum usb_phy_type type;
- unsigned long clk_rate;
-
- /* if set fails with -EPROBE_DEFER if can't get regulator */
- unsigned int needs_vcc:1;
- unsigned int needs_reset:1; /* deprecated */
- int gpio_reset;
- struct gpio_desc *gpiod_vbus;
-};
#if IS_ENABLED(CONFIG_NOP_USB_XCEIV)
/* sometimes transceivers are accessed only through e.g. ULPI */
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index d8860f2d0976..b0bff3083278 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -253,7 +253,7 @@ extern int usbnet_open(struct net_device *net);
extern int usbnet_stop(struct net_device *net);
extern netdev_tx_t usbnet_start_xmit(struct sk_buff *skb,
struct net_device *net);
-extern void usbnet_tx_timeout(struct net_device *net);
+extern void usbnet_tx_timeout(struct net_device *net, unsigned int txqueue);
extern int usbnet_change_mtu(struct net_device *net, int new_mtu);
extern int usbnet_get_endpoints(struct usbnet *, struct usb_interface *);
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 000a5954b2e8..4a19ac3f24d0 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -92,6 +92,6 @@ enum { US_DO_ALL_FLAGS };
#include <linux/usb/storage.h>
extern int usb_usual_ignore_device(struct usb_interface *intf);
-extern struct usb_device_id usb_storage_usb_ids[];
+extern const struct usb_device_id usb_storage_usb_ids[];
#endif /* __LINUX_USB_USUAL_H */
diff --git a/include/linux/usbdevice_fs.h b/include/linux/usbdevice_fs.h
index 79aab0065ec8..14ea197ce37f 100644
--- a/include/linux/usbdevice_fs.h
+++ b/include/linux/usbdevice_fs.h
@@ -69,7 +69,7 @@ struct usbdevfs_urb32 {
compat_int_t error_count;
compat_uint_t signr;
compat_caddr_t usercontext; /* unused */
- struct usbdevfs_iso_packet_desc iso_frame_desc[0];
+ struct usbdevfs_iso_packet_desc iso_frame_desc[];
};
struct usbdevfs_ioctl32 {
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index fb9f4f799554..6ef1c7109fc4 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -45,6 +45,7 @@ enum ucount_type {
UCOUNT_NET_NAMESPACES,
UCOUNT_MNT_NAMESPACES,
UCOUNT_CGROUP_NAMESPACES,
+ UCOUNT_TIME_NAMESPACES,
#ifdef CONFIG_INOTIFY_USER
UCOUNT_INOTIFY_INSTANCES,
UCOUNT_INOTIFY_WATCHES,
diff --git a/include/linux/uuid.h b/include/linux/uuid.h
index 0c631e2a73b6..d41b0d3e9474 100644
--- a/include/linux/uuid.h
+++ b/include/linux/uuid.h
@@ -43,6 +43,16 @@ static inline void guid_copy(guid_t *dst, const guid_t *src)
memcpy(dst, src, sizeof(guid_t));
}
+static inline void import_guid(guid_t *dst, const __u8 *src)
+{
+ memcpy(dst, src, sizeof(guid_t));
+}
+
+static inline void export_guid(__u8 *dst, const guid_t *src)
+{
+ memcpy(dst, src, sizeof(guid_t));
+}
+
static inline bool guid_is_null(const guid_t *guid)
{
return guid_equal(guid, &guid_null);
@@ -58,12 +68,23 @@ static inline void uuid_copy(uuid_t *dst, const uuid_t *src)
memcpy(dst, src, sizeof(uuid_t));
}
+static inline void import_uuid(uuid_t *dst, const __u8 *src)
+{
+ memcpy(dst, src, sizeof(uuid_t));
+}
+
+static inline void export_uuid(__u8 *dst, const uuid_t *src)
+{
+ memcpy(dst, src, sizeof(uuid_t));
+}
+
static inline bool uuid_is_null(const uuid_t *uuid)
{
return uuid_equal(uuid, &uuid_null);
}
void generate_random_uuid(unsigned char uuid[16]);
+void generate_random_guid(unsigned char guid[16]);
extern void guid_gen(guid_t *u);
extern void uuid_gen(uuid_t *u);
@@ -77,7 +98,6 @@ int guid_parse(const char *uuid, guid_t *u);
int uuid_parse(const char *uuid, uuid_t *u);
/* backwards compatibility, don't use in new code */
-#define uuid_le_gen(u) guid_gen(u)
#define uuid_le_to_bin(guid, u) guid_parse(guid, u)
static inline int uuid_le_cmp(const guid_t u1, const guid_t u2)
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index 07875ccc7bb5..71c81e0dc8f2 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -7,9 +7,6 @@
#include <net/sock.h>
#include <net/af_vsock.h>
-#define VIRTIO_VSOCK_DEFAULT_MIN_BUF_SIZE 128
-#define VIRTIO_VSOCK_DEFAULT_BUF_SIZE (1024 * 256)
-#define VIRTIO_VSOCK_DEFAULT_MAX_BUF_SIZE (1024 * 256)
#define VIRTIO_VSOCK_DEFAULT_RX_BUF_SIZE (1024 * 4)
#define VIRTIO_VSOCK_MAX_BUF_SIZE 0xFFFFFFFFUL
#define VIRTIO_VSOCK_MAX_PKT_BUF_SIZE (1024 * 64)
@@ -25,11 +22,6 @@ enum {
struct virtio_vsock_sock {
struct vsock_sock *vsk;
- /* Protected by lock_sock(sk_vsock(trans->vsk)) */
- u32 buf_size;
- u32 buf_size_min;
- u32 buf_size_max;
-
spinlock_t tx_lock;
spinlock_t rx_lock;
@@ -92,12 +84,6 @@ s64 virtio_transport_stream_has_space(struct vsock_sock *vsk);
int virtio_transport_do_socket_init(struct vsock_sock *vsk,
struct vsock_sock *psk);
-u64 virtio_transport_get_buffer_size(struct vsock_sock *vsk);
-u64 virtio_transport_get_min_buffer_size(struct vsock_sock *vsk);
-u64 virtio_transport_get_max_buffer_size(struct vsock_sock *vsk);
-void virtio_transport_set_buffer_size(struct vsock_sock *vsk, u64 val);
-void virtio_transport_set_min_buffer_size(struct vsock_sock *vsk, u64 val);
-void virtio_transport_set_max_buffer_size(struct vsock_sock *vs, u64 val);
int
virtio_transport_notify_poll_in(struct vsock_sock *vsk,
size_t target,
@@ -124,6 +110,7 @@ int virtio_transport_notify_send_pre_enqueue(struct vsock_sock *vsk,
struct vsock_transport_send_notify_data *data);
int virtio_transport_notify_send_post_enqueue(struct vsock_sock *vsk,
ssize_t written, struct vsock_transport_send_notify_data *data);
+void virtio_transport_notify_buffer_size(struct vsock_sock *vsk, u64 *val);
u64 virtio_transport_stream_rcvhiwat(struct vsock_sock *vsk);
bool virtio_transport_stream_is_active(struct vsock_sock *vsk);
@@ -150,7 +137,8 @@ virtio_transport_dgram_enqueue(struct vsock_sock *vsk,
void virtio_transport_destruct(struct vsock_sock *vsk);
-void virtio_transport_recv_pkt(struct virtio_vsock_pkt *pkt);
+void virtio_transport_recv_pkt(struct virtio_transport *t,
+ struct virtio_vsock_pkt *pkt);
void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt);
void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt);
u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
diff --git a/include/linux/vm_sockets.h b/include/linux/vm_sockets.h
deleted file mode 100644
index 33f1a2ecd905..000000000000
--- a/include/linux/vm_sockets.h
+++ /dev/null
@@ -1,15 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * VMware vSockets Driver
- *
- * Copyright (C) 2007-2013 VMware, Inc. All rights reserved.
- */
-
-#ifndef _VM_SOCKETS_H
-#define _VM_SOCKETS_H
-
-#include <uapi/linux/vm_sockets.h>
-
-int vm_sockets_get_local_cid(void);
-
-#endif /* _VM_SOCKETS_H */
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 4e7809408073..0507a162ccd0 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -10,6 +10,8 @@
#include <linux/rbtree.h>
#include <linux/overflow.h>
+#include <asm/vmalloc.h>
+
struct vm_area_struct; /* vma defining user mapping in mm_types.h */
struct notifier_block; /* in notifier.h */
@@ -22,6 +24,18 @@ struct notifier_block; /* in notifier.h */
#define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
#define VM_NO_GUARD 0x00000040 /* don't add guard page */
#define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
+
+/*
+ * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
+ *
+ * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after
+ * shadow memory has been mapped. It's used to handle allocation errors so that
+ * we don't try to poision shadow on free if it was never allocated.
+ *
+ * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to
+ * determine which allocations need the module shadow freed.
+ */
+
/*
* Memory with VM_FLUSH_RESET_PERMS cannot be freed in an interrupt or with
* vfree_atomic().
@@ -93,6 +107,7 @@ extern void *vzalloc(unsigned long size);
extern void *vmalloc_user(unsigned long size);
extern void *vmalloc_node(unsigned long size, int node);
extern void *vzalloc_node(unsigned long size, int node);
+extern void *vmalloc_user_node_flags(unsigned long size, int node, gfp_t flags);
extern void *vmalloc_exec(unsigned long size);
extern void *vmalloc_32(unsigned long size);
extern void *vmalloc_32_user(unsigned long size);
@@ -126,8 +141,9 @@ extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
unsigned long pgoff);
-void vmalloc_sync_all(void);
-
+void vmalloc_sync_mappings(void);
+void vmalloc_sync_unmappings(void);
+
/*
* Lowlevel-APIs (not for driver use!)
*/
diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
index bdeda4b079fe..292485f3d24d 100644
--- a/include/linux/vmstat.h
+++ b/include/linux/vmstat.h
@@ -31,6 +31,12 @@ struct reclaim_stat {
unsigned nr_unmap_fail;
};
+enum writeback_stat_item {
+ NR_DIRTY_THRESHOLD,
+ NR_DIRTY_BG_THRESHOLD,
+ NR_VM_WRITEBACK_STAT_ITEMS,
+};
+
#ifdef CONFIG_VM_EVENT_COUNTERS
/*
* Light weight per cpu counter implementation.
@@ -381,4 +387,48 @@ static inline void __mod_zone_freepage_state(struct zone *zone, int nr_pages,
extern const char * const vmstat_text[];
+static inline const char *zone_stat_name(enum zone_stat_item item)
+{
+ return vmstat_text[item];
+}
+
+#ifdef CONFIG_NUMA
+static inline const char *numa_stat_name(enum numa_stat_item item)
+{
+ return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
+ item];
+}
+#endif /* CONFIG_NUMA */
+
+static inline const char *node_stat_name(enum node_stat_item item)
+{
+ return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
+ NR_VM_NUMA_STAT_ITEMS +
+ item];
+}
+
+static inline const char *lru_list_name(enum lru_list lru)
+{
+ return node_stat_name(NR_LRU_BASE + lru) + 3; // skip "nr_"
+}
+
+static inline const char *writeback_stat_name(enum writeback_stat_item item)
+{
+ return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
+ NR_VM_NUMA_STAT_ITEMS +
+ NR_VM_NODE_STAT_ITEMS +
+ item];
+}
+
+#if defined(CONFIG_VM_EVENT_COUNTERS) || defined(CONFIG_MEMCG)
+static inline const char *vm_event_name(enum vm_event_item item)
+{
+ return vmstat_text[NR_VM_ZONE_STAT_ITEMS +
+ NR_VM_NUMA_STAT_ITEMS +
+ NR_VM_NODE_STAT_ITEMS +
+ NR_VM_WRITEBACK_STAT_ITEMS +
+ item];
+}
+#endif /* CONFIG_VM_EVENT_COUNTERS || CONFIG_MEMCG */
+
#endif /* _LINUX_VMSTAT_H */
diff --git a/include/linux/vmw_vmci_api.h b/include/linux/vmw_vmci_api.h
index acd9fafe4fc6..f28907345c80 100644
--- a/include/linux/vmw_vmci_api.h
+++ b/include/linux/vmw_vmci_api.h
@@ -19,6 +19,7 @@
struct msghdr;
typedef void (vmci_device_shutdown_fn) (void *device_registration,
void *user_data);
+typedef void (*vmci_vsock_cb) (bool is_host);
int vmci_datagram_create_handle(u32 resource_id, u32 flags,
vmci_datagram_recv_cb recv_cb,
@@ -37,6 +38,7 @@ int vmci_doorbell_destroy(struct vmci_handle handle);
int vmci_doorbell_notify(struct vmci_handle handle, u32 priv_flags);
u32 vmci_get_context_id(void);
bool vmci_is_context_owner(u32 context_id, kuid_t uid);
+int vmci_register_vsock_callback(vmci_vsock_cb callback);
int vmci_event_subscribe(u32 event,
vmci_event_cb callback, void *callback_data,
diff --git a/include/linux/vtime.h b/include/linux/vtime.h
index a26ed10a4eac..2cdeca062db3 100644
--- a/include/linux/vtime.h
+++ b/include/linux/vtime.h
@@ -11,11 +11,15 @@
struct task_struct;
/*
- * vtime_accounting_cpu_enabled() definitions/declarations
+ * vtime_accounting_enabled_this_cpu() definitions/declarations
*/
#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE)
-static inline bool vtime_accounting_cpu_enabled(void) { return true; }
+
+static inline bool vtime_accounting_enabled_this_cpu(void) { return true; }
+extern void vtime_task_switch(struct task_struct *prev);
+
#elif defined(CONFIG_VIRT_CPU_ACCOUNTING_GEN)
+
/*
* Checks if vtime is enabled on some CPU. Cputime readers want to be careful
* in that case and compute the tickless cputime.
@@ -24,46 +28,43 @@ static inline bool vtime_accounting_cpu_enabled(void) { return true; }
*/
static inline bool vtime_accounting_enabled(void)
{
- return context_tracking_is_enabled();
+ return context_tracking_enabled();
}
-static inline bool vtime_accounting_cpu_enabled(void)
+static inline bool vtime_accounting_enabled_cpu(int cpu)
{
- if (vtime_accounting_enabled()) {
- if (context_tracking_cpu_is_enabled())
- return true;
- }
-
- return false;
+ return context_tracking_enabled_cpu(cpu);
}
-#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
-static inline bool vtime_accounting_cpu_enabled(void) { return false; }
-#endif
+static inline bool vtime_accounting_enabled_this_cpu(void)
+{
+ return context_tracking_enabled_this_cpu();
+}
-/*
- * Common vtime APIs
- */
-#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void vtime_task_switch_generic(struct task_struct *prev);
-#ifdef __ARCH_HAS_VTIME_TASK_SWITCH
-extern void vtime_task_switch(struct task_struct *prev);
-#else
-extern void vtime_common_task_switch(struct task_struct *prev);
static inline void vtime_task_switch(struct task_struct *prev)
{
- if (vtime_accounting_cpu_enabled())
- vtime_common_task_switch(prev);
+ if (vtime_accounting_enabled_this_cpu())
+ vtime_task_switch_generic(prev);
}
-#endif /* __ARCH_HAS_VTIME_TASK_SWITCH */
-
-extern void vtime_account_system(struct task_struct *tsk);
-extern void vtime_account_idle(struct task_struct *tsk);
#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
+static inline bool vtime_accounting_enabled_cpu(int cpu) {return false; }
+static inline bool vtime_accounting_enabled_this_cpu(void) { return false; }
static inline void vtime_task_switch(struct task_struct *prev) { }
-static inline void vtime_account_system(struct task_struct *tsk) { }
+
+#endif
+
+/*
+ * Common vtime APIs
+ */
+#ifdef CONFIG_VIRT_CPU_ACCOUNTING
+extern void vtime_account_kernel(struct task_struct *tsk);
+extern void vtime_account_idle(struct task_struct *tsk);
+#else /* !CONFIG_VIRT_CPU_ACCOUNTING */
+static inline void vtime_account_kernel(struct task_struct *tsk) { }
#endif /* !CONFIG_VIRT_CPU_ACCOUNTING */
#ifdef CONFIG_VIRT_CPU_ACCOUNTING_GEN
@@ -86,7 +87,7 @@ extern void vtime_account_irq_enter(struct task_struct *tsk);
static inline void vtime_account_irq_exit(struct task_struct *tsk)
{
/* On hard|softirq exit we always account to hard|softirq cputime */
- vtime_account_system(tsk);
+ vtime_account_kernel(tsk);
}
extern void vtime_flush(struct task_struct *tsk);
#else /* !CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
diff --git a/include/linux/w1.h b/include/linux/w1.h
index 7da0c7588e04..cebf3464bc03 100644
--- a/include/linux/w1.h
+++ b/include/linux/w1.h
@@ -262,6 +262,7 @@ struct w1_family_ops {
* @family_entry: family linked list
* @fid: 8 bit family identifier
* @fops: operations for this family
+ * @of_match_table: open firmware match table
* @refcnt: reference counter
*/
struct w1_family {
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 3eb7cae8206c..feeb6be5cad6 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -20,6 +20,7 @@ int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int
#define WQ_FLAG_EXCLUSIVE 0x01
#define WQ_FLAG_WOKEN 0x02
#define WQ_FLAG_BOOKMARK 0x04
+#define WQ_FLAG_CUSTOM 0x08
/*
* A single wait-queue entry structure:
@@ -201,9 +202,10 @@ void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void
void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
unsigned int mode, void *key, wait_queue_entry_t *bookmark);
-void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
+void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
+void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
-void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
+void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode);
#define wake_up(x) __wake_up(x, TASK_NORMAL, 1, NULL)
#define wake_up_nr(x, nr) __wake_up(x, TASK_NORMAL, nr, NULL)
@@ -214,7 +216,7 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
#define wake_up_interruptible(x) __wake_up(x, TASK_INTERRUPTIBLE, 1, NULL)
#define wake_up_interruptible_nr(x, nr) __wake_up(x, TASK_INTERRUPTIBLE, nr, NULL)
#define wake_up_interruptible_all(x) __wake_up(x, TASK_INTERRUPTIBLE, 0, NULL)
-#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE, 1)
+#define wake_up_interruptible_sync(x) __wake_up_sync((x), TASK_INTERRUPTIBLE)
/*
* Wakeup macros to be used to report events to the targets.
@@ -228,7 +230,9 @@ void __wake_up_sync(struct wait_queue_head *wq_head, unsigned int mode, int nr);
#define wake_up_interruptible_poll(x, m) \
__wake_up(x, TASK_INTERRUPTIBLE, 1, poll_to_key(m))
#define wake_up_interruptible_sync_poll(x, m) \
- __wake_up_sync_key((x), TASK_INTERRUPTIBLE, 1, poll_to_key(m))
+ __wake_up_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
+#define wake_up_interruptible_sync_poll_locked(x, m) \
+ __wake_up_locked_sync_key((x), TASK_INTERRUPTIBLE, poll_to_key(m))
#define ___wait_cond_timeout(condition) \
({ \
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 4261d1c6e87b..e48554e6526c 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -487,6 +487,19 @@ extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
*
* We queue the work to the CPU on which it was submitted, but if the CPU dies
* it can be processed by another CPU.
+ *
+ * Memory-ordering properties: If it returns %true, guarantees that all stores
+ * preceding the call to queue_work() in the program order will be visible from
+ * the CPU which will execute @work by the time such work executes, e.g.,
+ *
+ * { x is initially 0 }
+ *
+ * CPU0 CPU1
+ *
+ * WRITE_ONCE(x, 1); [ @work is being executed ]
+ * r0 = queue_work(wq, work); r1 = READ_ONCE(x);
+ *
+ * Forbids: r0 == true && r1 == 0
*/
static inline bool queue_work(struct workqueue_struct *wq,
struct work_struct *work)
@@ -546,6 +559,9 @@ static inline bool schedule_work_on(int cpu, struct work_struct *work)
* This puts a job in the kernel-global workqueue if it was not already
* queued and leaves it in the same position on the kernel-global
* workqueue otherwise.
+ *
+ * Shares the same memory-ordering properties of queue_work(), cf. the
+ * DocBook header of queue_work().
*/
static inline bool schedule_work(struct work_struct *work)
{
diff --git a/include/linux/ww_mutex.h b/include/linux/ww_mutex.h
index 3af7c0e03be5..d7554252404c 100644
--- a/include/linux/ww_mutex.h
+++ b/include/linux/ww_mutex.h
@@ -182,7 +182,7 @@ static inline void ww_acquire_done(struct ww_acquire_ctx *ctx)
static inline void ww_acquire_fini(struct ww_acquire_ctx *ctx)
{
#ifdef CONFIG_DEBUG_MUTEXES
- mutex_release(&ctx->dep_map, 0, _THIS_IP_);
+ mutex_release(&ctx->dep_map, _THIS_IP_);
DEBUG_LOCKS_WARN_ON(ctx->acquired);
if (!IS_ENABLED(CONFIG_PROVE_LOCKING))
diff --git a/include/linux/xarray.h b/include/linux/xarray.h
index 86eecbd98e84..f73e1775ded0 100644
--- a/include/linux/xarray.h
+++ b/include/linux/xarray.h
@@ -417,6 +417,36 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
}
/**
+ * xa_for_each_range() - Iterate over a portion of an XArray.
+ * @xa: XArray.
+ * @index: Index of @entry.
+ * @entry: Entry retrieved from array.
+ * @start: First index to retrieve from array.
+ * @last: Last index to retrieve from array.
+ *
+ * During the iteration, @entry will have the value of the entry stored
+ * in @xa at @index. You may modify @index during the iteration if you
+ * want to skip or reprocess indices. It is safe to modify the array
+ * during the iteration. At the end of the iteration, @entry will be set
+ * to NULL and @index will have a value less than or equal to max.
+ *
+ * xa_for_each_range() is O(n.log(n)) while xas_for_each() is O(n). You have
+ * to handle your own locking with xas_for_each(), and if you have to unlock
+ * after each iteration, it will also end up being O(n.log(n)).
+ * xa_for_each_range() will spin if it hits a retry entry; if you intend to
+ * see retry entries, you should use the xas_for_each() iterator instead.
+ * The xas_for_each() iterator will expand into more inline code than
+ * xa_for_each_range().
+ *
+ * Context: Any context. Takes and releases the RCU lock.
+ */
+#define xa_for_each_range(xa, index, entry, start, last) \
+ for (index = start, \
+ entry = xa_find(xa, &index, last, XA_PRESENT); \
+ entry; \
+ entry = xa_find_after(xa, &index, last, XA_PRESENT))
+
+/**
* xa_for_each_start() - Iterate over a portion of an XArray.
* @xa: XArray.
* @index: Index of @entry.
@@ -439,11 +469,8 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
*
* Context: Any context. Takes and releases the RCU lock.
*/
-#define xa_for_each_start(xa, index, entry, start) \
- for (index = start, \
- entry = xa_find(xa, &index, ULONG_MAX, XA_PRESENT); \
- entry; \
- entry = xa_find_after(xa, &index, ULONG_MAX, XA_PRESENT))
+#define xa_for_each_start(xa, index, entry, start) \
+ xa_for_each_range(xa, index, entry, start, ULONG_MAX)
/**
* xa_for_each() - Iterate over present entries in an XArray.
@@ -508,6 +535,14 @@ static inline bool xa_marked(const struct xarray *xa, xa_mark_t mark)
spin_lock_irqsave(&(xa)->xa_lock, flags)
#define xa_unlock_irqrestore(xa, flags) \
spin_unlock_irqrestore(&(xa)->xa_lock, flags)
+#define xa_lock_nested(xa, subclass) \
+ spin_lock_nested(&(xa)->xa_lock, subclass)
+#define xa_lock_bh_nested(xa, subclass) \
+ spin_lock_bh_nested(&(xa)->xa_lock, subclass)
+#define xa_lock_irq_nested(xa, subclass) \
+ spin_lock_irq_nested(&(xa)->xa_lock, subclass)
+#define xa_lock_irqsave_nested(xa, flags, subclass) \
+ spin_lock_irqsave_nested(&(xa)->xa_lock, flags, subclass)
/*
* Versions of the normal API which require the caller to hold the
diff --git a/include/linux/zlib.h b/include/linux/zlib.h
index 92dbbd3f6c75..c757d848a758 100644
--- a/include/linux/zlib.h
+++ b/include/linux/zlib.h
@@ -191,6 +191,12 @@ extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
exceed those passed here.
*/
+extern int zlib_deflate_dfltcc_enabled (void);
+/*
+ Returns 1 if Deflate-Conversion facility is installed and enabled,
+ otherwise 0.
+*/
+
/*
extern int deflateInit (z_streamp strm, int level);
diff --git a/include/linux/zorro.h b/include/linux/zorro.h
index 63fbba0740c2..e2e4de188d84 100644
--- a/include/linux/zorro.h
+++ b/include/linux/zorro.h
@@ -41,13 +41,6 @@ struct zorro_dev {
/*
- * Zorro bus
- */
-
-extern struct bus_type zorro_bus_type;
-
-
- /*
* Zorro device drivers
*/
@@ -70,11 +63,6 @@ struct zorro_driver {
/* New-style probing */
extern int zorro_register_driver(struct zorro_driver *);
extern void zorro_unregister_driver(struct zorro_driver *);
-extern const struct zorro_device_id *zorro_match_device(const struct zorro_device_id *ids, const struct zorro_dev *z);
-static inline struct zorro_driver *zorro_dev_driver(const struct zorro_dev *z)
-{
- return z->driver;
-}
extern unsigned int zorro_num_autocon; /* # of autoconfig devices found */
diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h
index f161f8a493ac..38956969fd12 100644
--- a/include/media/cec-notifier.h
+++ b/include/media/cec-notifier.h
@@ -20,37 +20,14 @@ struct cec_notifier;
#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER)
/**
- * cec_notifier_get_conn - find or create a new cec_notifier for the given
- * device and connector tuple.
- * @dev: device that sends the events.
- * @conn: the connector name from which the event occurs
- *
- * If a notifier for device @dev already exists, then increase the refcount
- * and return that notifier.
- *
- * If it doesn't exist, then allocate a new notifier struct and return a
- * pointer to that new struct.
- *
- * Return NULL if the memory could not be allocated.
- */
-struct cec_notifier *cec_notifier_get_conn(struct device *dev,
- const char *conn);
-
-/**
- * cec_notifier_put - decrease refcount and delete when the refcount reaches 0.
- * @n: notifier
- */
-void cec_notifier_put(struct cec_notifier *n);
-
-/**
* cec_notifier_conn_register - find or create a new cec_notifier for the given
* HDMI device and connector tuple.
* @hdmi_dev: HDMI device that sends the events.
- * @conn_name: the connector name from which the event occurs. May be NULL
+ * @port_name: the connector name from which the event occurs. May be NULL
* if there is always only one HDMI connector created by the HDMI device.
* @conn_info: the connector info from which the event occurs (may be NULL)
*
- * If a notifier for device @dev and connector @conn_name already exists, then
+ * If a notifier for device @dev and connector @port_name already exists, then
* increase the refcount and return that notifier.
*
* If it doesn't exist, then allocate a new notifier struct and return a
@@ -59,7 +36,7 @@ void cec_notifier_put(struct cec_notifier *n);
* Return NULL if the memory could not be allocated.
*/
struct cec_notifier *
-cec_notifier_conn_register(struct device *hdmi_dev, const char *conn_name,
+cec_notifier_conn_register(struct device *hdmi_dev, const char *port_name,
const struct cec_connector_info *conn_info);
/**
@@ -73,11 +50,11 @@ void cec_notifier_conn_unregister(struct cec_notifier *n);
* cec_notifier_cec_adap_register - find or create a new cec_notifier for the
* given device.
* @hdmi_dev: HDMI device that sends the events.
- * @conn_name: the connector name from which the event occurs. May be NULL
+ * @port_name: the connector name from which the event occurs. May be NULL
* if there is always only one HDMI connector created by the HDMI device.
* @adap: the cec adapter that registered this notifier.
*
- * If a notifier for device @dev and connector @conn_name already exists, then
+ * If a notifier for device @dev and connector @port_name already exists, then
* increase the refcount and return that notifier.
*
* If it doesn't exist, then allocate a new notifier struct and return a
@@ -86,15 +63,17 @@ void cec_notifier_conn_unregister(struct cec_notifier *n);
* Return NULL if the memory could not be allocated.
*/
struct cec_notifier *
-cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *conn_name,
+cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *port_name,
struct cec_adapter *adap);
/**
* cec_notifier_cec_adap_unregister - decrease refcount and delete when the
* refcount reaches 0.
* @n: notifier. If NULL, then this function does nothing.
+ * @adap: the cec adapter that registered this notifier.
*/
-void cec_notifier_cec_adap_unregister(struct cec_notifier *n);
+void cec_notifier_cec_adap_unregister(struct cec_notifier *n,
+ struct cec_adapter *adap);
/**
* cec_notifier_set_phys_addr - set a new physical address.
@@ -129,19 +108,9 @@ void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n,
struct device *cec_notifier_parse_hdmi_phandle(struct device *dev);
#else
-static inline struct cec_notifier *cec_notifier_get_conn(struct device *dev,
- const char *conn)
-{
- /* A non-NULL pointer is expected on success */
- return (struct cec_notifier *)0xdeadfeed;
-}
-
-static inline void cec_notifier_put(struct cec_notifier *n)
-{
-}
static inline struct cec_notifier *
-cec_notifier_conn_register(struct device *hdmi_dev, const char *conn_name,
+cec_notifier_conn_register(struct device *hdmi_dev, const char *port_name,
const struct cec_connector_info *conn_info)
{
/* A non-NULL pointer is expected on success */
@@ -153,14 +122,15 @@ static inline void cec_notifier_conn_unregister(struct cec_notifier *n)
}
static inline struct cec_notifier *
-cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *conn_name,
+cec_notifier_cec_adap_register(struct device *hdmi_dev, const char *port_name,
struct cec_adapter *adap)
{
/* A non-NULL pointer is expected on success */
return (struct cec_notifier *)0xdeadfeed;
}
-static inline void cec_notifier_cec_adap_unregister(struct cec_notifier *n)
+static inline void cec_notifier_cec_adap_unregister(struct cec_notifier *n,
+ struct cec_adapter *adap)
{
}
@@ -181,23 +151,6 @@ static inline struct device *cec_notifier_parse_hdmi_phandle(struct device *dev)
#endif
/**
- * cec_notifier_get - find or create a new cec_notifier for the given device.
- * @dev: device that sends the events.
- *
- * If a notifier for device @dev already exists, then increase the refcount
- * and return that notifier.
- *
- * If it doesn't exist, then allocate a new notifier struct and return a
- * pointer to that new struct.
- *
- * Return NULL if the memory could not be allocated.
- */
-static inline struct cec_notifier *cec_notifier_get(struct device *dev)
-{
- return cec_notifier_get_conn(dev, NULL);
-}
-
-/**
* cec_notifier_phys_addr_invalidate() - set the physical address to INVALID
*
* @n: the CEC notifier
diff --git a/include/media/cec-pin.h b/include/media/cec-pin.h
index 604e79cb6cbf..88c8b016eb09 100644
--- a/include/media/cec-pin.h
+++ b/include/media/cec-pin.h
@@ -29,8 +29,11 @@
* an error if negative. If NULL or -ENOTTY is returned,
* then this is not supported.
*
- * These operations are used by the cec pin framework to manipulate
- * the CEC pin.
+ * @received: optional. High-level CEC message callback. Allows the driver
+ * to process CEC messages.
+ *
+ * These operations (except for the @received op) are used by the
+ * cec pin framework to manipulate the CEC pin.
*/
struct cec_pin_ops {
bool (*read)(struct cec_adapter *adap);
@@ -42,6 +45,9 @@ struct cec_pin_ops {
void (*status)(struct cec_adapter *adap, struct seq_file *file);
int (*read_hpd)(struct cec_adapter *adap);
int (*read_5v)(struct cec_adapter *adap);
+
+ /* High-level CEC message callback */
+ int (*received)(struct cec_adapter *adap, struct cec_msg *msg);
};
/**
diff --git a/include/media/cec.h b/include/media/cec.h
index 4d59387bc61b..972bc8cd4384 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -18,9 +18,6 @@
#include <linux/cec-funcs.h>
#include <media/rc-core.h>
-/* CEC_ADAP_G_CONNECTOR_INFO is available */
-#define CEC_CAP_CONNECTOR_INFO (1 << 8)
-
#define CEC_CAP_DEFAULTS (CEC_CAP_LOG_ADDRS | CEC_CAP_TRANSMIT | \
CEC_CAP_PASSTHROUGH | CEC_CAP_RC)
@@ -147,34 +144,6 @@ struct cec_adap_ops {
*/
#define CEC_MAX_MSG_TX_QUEUE_SZ (18 * 1)
-/**
- * struct cec_drm_connector_info - tells which drm connector is
- * associated with the CEC adapter.
- * @card_no: drm card number
- * @connector_id: drm connector ID
- */
-struct cec_drm_connector_info {
- __u32 card_no;
- __u32 connector_id;
-};
-
-#define CEC_CONNECTOR_TYPE_NO_CONNECTOR 0
-#define CEC_CONNECTOR_TYPE_DRM 1
-
-/**
- * struct cec_connector_info - tells if and which connector is
- * associated with the CEC adapter.
- * @type: connector type (if any)
- * @drm: drm connector info
- */
-struct cec_connector_info {
- __u32 type;
- union {
- struct cec_drm_connector_info drm;
- __u32 raw[16];
- };
-};
-
struct cec_adapter {
struct module *owner;
char name[32];
@@ -417,52 +386,6 @@ cec_fill_conn_info_from_drm(struct cec_connector_info *conn_info,
#endif
-#if IS_REACHABLE(CONFIG_CEC_CORE) && IS_ENABLED(CONFIG_CEC_NOTIFIER)
-
-/**
- * cec_notifier_register - register a callback with the notifier
- * @n: the CEC notifier
- * @adap: the CEC adapter, passed as argument to the callback function
- * @callback: the callback function
- */
-void cec_notifier_register(struct cec_notifier *n,
- struct cec_adapter *adap,
- void (*callback)(struct cec_adapter *adap, u16 pa));
-
-/**
- * cec_notifier_unregister - unregister the callback from the notifier.
- * @n: the CEC notifier
- */
-void cec_notifier_unregister(struct cec_notifier *n);
-
-/**
- * cec_register_cec_notifier - register the notifier with the cec adapter.
- * @adap: the CEC adapter
- * @notifier: the CEC notifier
- */
-void cec_register_cec_notifier(struct cec_adapter *adap,
- struct cec_notifier *notifier);
-
-#else
-
-static inline void
-cec_notifier_register(struct cec_notifier *n,
- struct cec_adapter *adap,
- void (*callback)(struct cec_adapter *adap, u16 pa))
-{
-}
-
-static inline void cec_notifier_unregister(struct cec_notifier *n)
-{
-}
-
-static inline void cec_register_cec_notifier(struct cec_adapter *adap,
- struct cec_notifier *notifier)
-{
-}
-
-#endif
-
/**
* cec_phys_addr_invalidate() - set the physical address to INVALID
*
diff --git a/include/media/dvb-usb-ids.h b/include/media/dvb-usb-ids.h
index 7ce4e8332421..800d473b03c4 100644
--- a/include/media/dvb-usb-ids.h
+++ b/include/media/dvb-usb-ids.h
@@ -180,6 +180,7 @@
#define USB_PID_TERRATEC_CINERGY_T_STICK_RC 0x0097
#define USB_PID_TERRATEC_CINERGY_T_STICK_DUAL_RC 0x0099
#define USB_PID_TERRATEC_CINERGY_T_STICK_BLACK_REV1 0x00a9
+#define USB_PID_TERRATEC_CINERGY_TC2_STICK 0x10b2
#define USB_PID_TWINHAN_VP7041_COLD 0x3201
#define USB_PID_TWINHAN_VP7041_WARM 0x3202
#define USB_PID_TWINHAN_VP7020_COLD 0x3203
@@ -389,6 +390,7 @@
#define USB_PID_MYGICA_T230 0xc688
#define USB_PID_MYGICA_T230C 0xc689
#define USB_PID_MYGICA_T230C2 0xc68a
+#define USB_PID_MYGICA_T230C_LITE 0xc699
#define USB_PID_ELGATO_EYETV_DIVERSITY 0x0011
#define USB_PID_ELGATO_EYETV_DTT 0x0021
#define USB_PID_ELGATO_EYETV_DTT_2 0x003f
@@ -424,4 +426,5 @@
#define USB_PID_EVOLVEO_XTRATV_STICK 0xa115
#define USB_PID_HAMA_DVBT_HYBRID 0x2758
#define USB_PID_XBOX_ONE_TUNER 0x02d5
+#define USB_PID_PROLECTRIX_DV107669 0xd803
#endif
diff --git a/include/media/h264-ctrls.h b/include/media/h264-ctrls.h
index e877bf1d537c..1c6ff7d63bca 100644
--- a/include/media/h264-ctrls.h
+++ b/include/media/h264-ctrls.h
@@ -185,6 +185,8 @@ struct v4l2_ctrl_h264_slice_params {
#define V4L2_H264_DPB_ENTRY_FLAG_VALID 0x01
#define V4L2_H264_DPB_ENTRY_FLAG_ACTIVE 0x02
#define V4L2_H264_DPB_ENTRY_FLAG_LONG_TERM 0x04
+#define V4L2_H264_DPB_ENTRY_FLAG_FIELD 0x08
+#define V4L2_H264_DPB_ENTRY_FLAG_BOTTOM_FIELD 0x10
struct v4l2_h264_dpb_entry {
__u64 reference_ts;
diff --git a/include/media/hevc-ctrls.h b/include/media/hevc-ctrls.h
new file mode 100644
index 000000000000..1009cf0891cc
--- /dev/null
+++ b/include/media/hevc-ctrls.h
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * These are the HEVC state controls for use with stateless HEVC
+ * codec drivers.
+ *
+ * It turns out that these structs are not stable yet and will undergo
+ * more changes. So keep them private until they are stable and ready to
+ * become part of the official public API.
+ */
+
+#ifndef _HEVC_CTRLS_H_
+#define _HEVC_CTRLS_H_
+
+#include <linux/videodev2.h>
+
+/* The pixel format isn't stable at the moment and will likely be renamed. */
+#define V4L2_PIX_FMT_HEVC_SLICE v4l2_fourcc('S', '2', '6', '5') /* HEVC parsed slices */
+
+#define V4L2_CID_MPEG_VIDEO_HEVC_SPS (V4L2_CID_MPEG_BASE + 1008)
+#define V4L2_CID_MPEG_VIDEO_HEVC_PPS (V4L2_CID_MPEG_BASE + 1009)
+#define V4L2_CID_MPEG_VIDEO_HEVC_SLICE_PARAMS (V4L2_CID_MPEG_BASE + 1010)
+#define V4L2_CID_MPEG_VIDEO_HEVC_DECODE_MODE (V4L2_CID_MPEG_BASE + 1015)
+#define V4L2_CID_MPEG_VIDEO_HEVC_START_CODE (V4L2_CID_MPEG_BASE + 1016)
+
+/* enum v4l2_ctrl_type type values */
+#define V4L2_CTRL_TYPE_HEVC_SPS 0x0120
+#define V4L2_CTRL_TYPE_HEVC_PPS 0x0121
+#define V4L2_CTRL_TYPE_HEVC_SLICE_PARAMS 0x0122
+
+enum v4l2_mpeg_video_hevc_decode_mode {
+ V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_SLICE_BASED,
+ V4L2_MPEG_VIDEO_HEVC_DECODE_MODE_FRAME_BASED,
+};
+
+enum v4l2_mpeg_video_hevc_start_code {
+ V4L2_MPEG_VIDEO_HEVC_START_CODE_NONE,
+ V4L2_MPEG_VIDEO_HEVC_START_CODE_ANNEX_B,
+};
+
+#define V4L2_HEVC_SLICE_TYPE_B 0
+#define V4L2_HEVC_SLICE_TYPE_P 1
+#define V4L2_HEVC_SLICE_TYPE_I 2
+
+#define V4L2_HEVC_SPS_FLAG_SEPARATE_COLOUR_PLANE (1ULL << 0)
+#define V4L2_HEVC_SPS_FLAG_SCALING_LIST_ENABLED (1ULL << 1)
+#define V4L2_HEVC_SPS_FLAG_AMP_ENABLED (1ULL << 2)
+#define V4L2_HEVC_SPS_FLAG_SAMPLE_ADAPTIVE_OFFSET (1ULL << 3)
+#define V4L2_HEVC_SPS_FLAG_PCM_ENABLED (1ULL << 4)
+#define V4L2_HEVC_SPS_FLAG_PCM_LOOP_FILTER_DISABLED (1ULL << 5)
+#define V4L2_HEVC_SPS_FLAG_LONG_TERM_REF_PICS_PRESENT (1ULL << 6)
+#define V4L2_HEVC_SPS_FLAG_SPS_TEMPORAL_MVP_ENABLED (1ULL << 7)
+#define V4L2_HEVC_SPS_FLAG_STRONG_INTRA_SMOOTHING_ENABLED (1ULL << 8)
+
+/* The controls are not stable at the moment and will likely be reworked. */
+struct v4l2_ctrl_hevc_sps {
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Sequence parameter set */
+ __u16 pic_width_in_luma_samples;
+ __u16 pic_height_in_luma_samples;
+ __u8 bit_depth_luma_minus8;
+ __u8 bit_depth_chroma_minus8;
+ __u8 log2_max_pic_order_cnt_lsb_minus4;
+ __u8 sps_max_dec_pic_buffering_minus1;
+ __u8 sps_max_num_reorder_pics;
+ __u8 sps_max_latency_increase_plus1;
+ __u8 log2_min_luma_coding_block_size_minus3;
+ __u8 log2_diff_max_min_luma_coding_block_size;
+ __u8 log2_min_luma_transform_block_size_minus2;
+ __u8 log2_diff_max_min_luma_transform_block_size;
+ __u8 max_transform_hierarchy_depth_inter;
+ __u8 max_transform_hierarchy_depth_intra;
+ __u8 pcm_sample_bit_depth_luma_minus1;
+ __u8 pcm_sample_bit_depth_chroma_minus1;
+ __u8 log2_min_pcm_luma_coding_block_size_minus3;
+ __u8 log2_diff_max_min_pcm_luma_coding_block_size;
+ __u8 num_short_term_ref_pic_sets;
+ __u8 num_long_term_ref_pics_sps;
+ __u8 chroma_format_idc;
+
+ __u8 padding;
+
+ __u64 flags;
+};
+
+#define V4L2_HEVC_PPS_FLAG_DEPENDENT_SLICE_SEGMENT (1ULL << 0)
+#define V4L2_HEVC_PPS_FLAG_OUTPUT_FLAG_PRESENT (1ULL << 1)
+#define V4L2_HEVC_PPS_FLAG_SIGN_DATA_HIDING_ENABLED (1ULL << 2)
+#define V4L2_HEVC_PPS_FLAG_CABAC_INIT_PRESENT (1ULL << 3)
+#define V4L2_HEVC_PPS_FLAG_CONSTRAINED_INTRA_PRED (1ULL << 4)
+#define V4L2_HEVC_PPS_FLAG_TRANSFORM_SKIP_ENABLED (1ULL << 5)
+#define V4L2_HEVC_PPS_FLAG_CU_QP_DELTA_ENABLED (1ULL << 6)
+#define V4L2_HEVC_PPS_FLAG_PPS_SLICE_CHROMA_QP_OFFSETS_PRESENT (1ULL << 7)
+#define V4L2_HEVC_PPS_FLAG_WEIGHTED_PRED (1ULL << 8)
+#define V4L2_HEVC_PPS_FLAG_WEIGHTED_BIPRED (1ULL << 9)
+#define V4L2_HEVC_PPS_FLAG_TRANSQUANT_BYPASS_ENABLED (1ULL << 10)
+#define V4L2_HEVC_PPS_FLAG_TILES_ENABLED (1ULL << 11)
+#define V4L2_HEVC_PPS_FLAG_ENTROPY_CODING_SYNC_ENABLED (1ULL << 12)
+#define V4L2_HEVC_PPS_FLAG_LOOP_FILTER_ACROSS_TILES_ENABLED (1ULL << 13)
+#define V4L2_HEVC_PPS_FLAG_PPS_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 14)
+#define V4L2_HEVC_PPS_FLAG_DEBLOCKING_FILTER_OVERRIDE_ENABLED (1ULL << 15)
+#define V4L2_HEVC_PPS_FLAG_PPS_DISABLE_DEBLOCKING_FILTER (1ULL << 16)
+#define V4L2_HEVC_PPS_FLAG_LISTS_MODIFICATION_PRESENT (1ULL << 17)
+#define V4L2_HEVC_PPS_FLAG_SLICE_SEGMENT_HEADER_EXTENSION_PRESENT (1ULL << 18)
+
+struct v4l2_ctrl_hevc_pps {
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture parameter set */
+ __u8 num_extra_slice_header_bits;
+ __s8 init_qp_minus26;
+ __u8 diff_cu_qp_delta_depth;
+ __s8 pps_cb_qp_offset;
+ __s8 pps_cr_qp_offset;
+ __u8 num_tile_columns_minus1;
+ __u8 num_tile_rows_minus1;
+ __u8 column_width_minus1[20];
+ __u8 row_height_minus1[22];
+ __s8 pps_beta_offset_div2;
+ __s8 pps_tc_offset_div2;
+ __u8 log2_parallel_merge_level_minus2;
+
+ __u8 padding[4];
+ __u64 flags;
+};
+
+#define V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_BEFORE 0x01
+#define V4L2_HEVC_DPB_ENTRY_RPS_ST_CURR_AFTER 0x02
+#define V4L2_HEVC_DPB_ENTRY_RPS_LT_CURR 0x03
+
+#define V4L2_HEVC_DPB_ENTRIES_NUM_MAX 16
+
+struct v4l2_hevc_dpb_entry {
+ __u64 timestamp;
+ __u8 rps;
+ __u8 field_pic;
+ __u16 pic_order_cnt[2];
+ __u8 padding[2];
+};
+
+struct v4l2_hevc_pred_weight_table {
+ __s8 delta_luma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 luma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 delta_chroma_weight_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+ __s8 chroma_offset_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+
+ __s8 delta_luma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 luma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __s8 delta_chroma_weight_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+ __s8 chroma_offset_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX][2];
+
+ __u8 padding[6];
+
+ __u8 luma_log2_weight_denom;
+ __s8 delta_chroma_log2_weight_denom;
+};
+
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_LUMA (1ULL << 0)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_SAO_CHROMA (1ULL << 1)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_TEMPORAL_MVP_ENABLED (1ULL << 2)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_MVD_L1_ZERO (1ULL << 3)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_CABAC_INIT (1ULL << 4)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_COLLOCATED_FROM_L0 (1ULL << 5)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_USE_INTEGER_MV (1ULL << 6)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_DEBLOCKING_FILTER_DISABLED (1ULL << 7)
+#define V4L2_HEVC_SLICE_PARAMS_FLAG_SLICE_LOOP_FILTER_ACROSS_SLICES_ENABLED (1ULL << 8)
+
+struct v4l2_ctrl_hevc_slice_params {
+ __u32 bit_size;
+ __u32 data_bit_offset;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: NAL unit header */
+ __u8 nal_unit_type;
+ __u8 nuh_temporal_id_plus1;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
+ __u8 slice_type;
+ __u8 colour_plane_id;
+ __u16 slice_pic_order_cnt;
+ __u8 num_ref_idx_l0_active_minus1;
+ __u8 num_ref_idx_l1_active_minus1;
+ __u8 collocated_ref_idx;
+ __u8 five_minus_max_num_merge_cand;
+ __s8 slice_qp_delta;
+ __s8 slice_cb_qp_offset;
+ __s8 slice_cr_qp_offset;
+ __s8 slice_act_y_qp_offset;
+ __s8 slice_act_cb_qp_offset;
+ __s8 slice_act_cr_qp_offset;
+ __s8 slice_beta_offset_div2;
+ __s8 slice_tc_offset_div2;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Picture timing SEI message */
+ __u8 pic_struct;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
+ __u8 num_active_dpb_entries;
+ __u8 ref_idx_l0[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+ __u8 ref_idx_l1[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+
+ __u8 num_rps_poc_st_curr_before;
+ __u8 num_rps_poc_st_curr_after;
+ __u8 num_rps_poc_lt_curr;
+
+ __u8 padding;
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: General slice segment header */
+ struct v4l2_hevc_dpb_entry dpb[V4L2_HEVC_DPB_ENTRIES_NUM_MAX];
+
+ /* ISO/IEC 23008-2, ITU-T Rec. H.265: Weighted prediction parameter */
+ struct v4l2_hevc_pred_weight_table pred_weight_table;
+
+ __u64 flags;
+};
+
+#endif
diff --git a/include/media/i2c/smiapp.h b/include/media/i2c/smiapp.h
deleted file mode 100644
index d6ccc859bfcd..000000000000
--- a/include/media/i2c/smiapp.h
+++ /dev/null
@@ -1,64 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * include/media/i2c/smiapp.h
- *
- * Generic driver for SMIA/SMIA++ compliant camera modules
- *
- * Copyright (C) 2011--2012 Nokia Corporation
- * Contact: Sakari Ailus <sakari.ailus@iki.fi>
- */
-
-#ifndef __SMIAPP_H_
-#define __SMIAPP_H_
-
-#include <media/v4l2-subdev.h>
-
-#define SMIAPP_NAME "smiapp"
-
-#define SMIAPP_DFL_I2C_ADDR (0x20 >> 1) /* Default I2C Address */
-#define SMIAPP_ALT_I2C_ADDR (0x6e >> 1) /* Alternate I2C Address */
-
-#define SMIAPP_CSI_SIGNALLING_MODE_CCP2_DATA_CLOCK 0
-#define SMIAPP_CSI_SIGNALLING_MODE_CCP2_DATA_STROBE 1
-#define SMIAPP_CSI_SIGNALLING_MODE_CSI2 2
-
-/*
- * Sometimes due to board layout considerations the camera module can be
- * mounted rotated. The typical rotation used is 180 degrees which can be
- * corrected by giving a default H-FLIP and V-FLIP in the sensor readout.
- * FIXME: rotation also changes the bayer pattern.
- */
-enum smiapp_module_board_orient {
- SMIAPP_MODULE_BOARD_ORIENT_0 = 0,
- SMIAPP_MODULE_BOARD_ORIENT_180,
-};
-
-struct smiapp_flash_strobe_parms {
- u8 mode;
- u32 strobe_width_high_us;
- u16 strobe_delay;
- u16 stobe_start_point;
- u8 trigger;
-};
-
-struct smiapp_hwconfig {
- /*
- * Change the cci address if i2c_addr_alt is set.
- * Both default and alternate cci addr need to be present
- */
- unsigned short i2c_addr_dfl; /* Default i2c addr */
- unsigned short i2c_addr_alt; /* Alternate i2c addr */
-
- uint32_t nvm_size; /* bytes */
- uint32_t ext_clk; /* sensor external clk */
-
- unsigned int lanes; /* Number of CSI-2 lanes */
- uint32_t csi_signalling_mode; /* SMIAPP_CSI_SIGNALLING_MODE_* */
- uint64_t *op_sys_clock;
-
- enum smiapp_module_board_orient module_board_orient;
-
- struct smiapp_flash_strobe_parms *strobe_setup;
-};
-
-#endif /* __SMIAPP_H_ */
diff --git a/include/media/rc-core.h b/include/media/rc-core.h
index 1f695d9c200a..d3f85df64bb2 100644
--- a/include/media/rc-core.h
+++ b/include/media/rc-core.h
@@ -192,7 +192,7 @@ struct rc_dev {
struct timer_list timer_repeat;
u32 last_keycode;
enum rc_proto last_protocol;
- u32 last_scancode;
+ u64 last_scancode;
u8 last_toggle;
u32 timeout;
u32 min_timeout;
@@ -284,12 +284,12 @@ int devm_rc_register_device(struct device *parent, struct rc_dev *dev);
void rc_unregister_device(struct rc_dev *dev);
void rc_repeat(struct rc_dev *dev);
-void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u32 scancode,
+void rc_keydown(struct rc_dev *dev, enum rc_proto protocol, u64 scancode,
u8 toggle);
void rc_keydown_notimeout(struct rc_dev *dev, enum rc_proto protocol,
- u32 scancode, u8 toggle);
+ u64 scancode, u8 toggle);
void rc_keyup(struct rc_dev *dev);
-u32 rc_g_keycode_from_table(struct rc_dev *dev, u32 scancode);
+u32 rc_g_keycode_from_table(struct rc_dev *dev, u64 scancode);
/*
* From rc-raw.c
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index afd2ab31bdf2..0ce896f10202 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -85,11 +85,11 @@
/**
* struct rc_map_table - represents a scancode/keycode pair
*
- * @scancode: scan code (u32)
+ * @scancode: scan code (u64)
* @keycode: Linux input keycode
*/
struct rc_map_table {
- u32 scancode;
+ u64 scancode;
u32 keycode;
};
@@ -159,21 +159,22 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_ASUS_PS3_100 "rc-asus-ps3-100"
#define RC_MAP_ATI_TV_WONDER_HD_600 "rc-ati-tv-wonder-hd-600"
#define RC_MAP_ATI_X10 "rc-ati-x10"
+#define RC_MAP_AVERMEDIA "rc-avermedia"
#define RC_MAP_AVERMEDIA_A16D "rc-avermedia-a16d"
#define RC_MAP_AVERMEDIA_CARDBUS "rc-avermedia-cardbus"
#define RC_MAP_AVERMEDIA_DVBT "rc-avermedia-dvbt"
#define RC_MAP_AVERMEDIA_M135A "rc-avermedia-m135a"
#define RC_MAP_AVERMEDIA_M733A_RM_K6 "rc-avermedia-m733a-rm-k6"
#define RC_MAP_AVERMEDIA_RM_KS "rc-avermedia-rm-ks"
-#define RC_MAP_AVERMEDIA "rc-avermedia"
#define RC_MAP_AVERTV_303 "rc-avertv-303"
#define RC_MAP_AZUREWAVE_AD_TU700 "rc-azurewave-ad-tu700"
-#define RC_MAP_BEHOLD_COLUMBUS "rc-behold-columbus"
+#define RC_MAP_BEELINK_GS1 "rc-beelink-gs1"
#define RC_MAP_BEHOLD "rc-behold"
+#define RC_MAP_BEHOLD_COLUMBUS "rc-behold-columbus"
#define RC_MAP_BUDGET_CI_OLD "rc-budget-ci-old"
#define RC_MAP_CEC "rc-cec"
-#define RC_MAP_CINERGY_1400 "rc-cinergy-1400"
#define RC_MAP_CINERGY "rc-cinergy"
+#define RC_MAP_CINERGY_1400 "rc-cinergy-1400"
#define RC_MAP_D680_DMB "rc-d680-dmb"
#define RC_MAP_DELOCK_61959 "rc-delock-61959"
#define RC_MAP_DIB0700_NEC_TABLE "rc-dib0700-nec"
@@ -181,17 +182,17 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_DIGITALNOW_TINYTWIN "rc-digitalnow-tinytwin"
#define RC_MAP_DIGITTRADE "rc-digittrade"
#define RC_MAP_DM1105_NEC "rc-dm1105-nec"
-#define RC_MAP_DNTV_LIVE_DVBT_PRO "rc-dntv-live-dvbt-pro"
#define RC_MAP_DNTV_LIVE_DVB_T "rc-dntv-live-dvb-t"
+#define RC_MAP_DNTV_LIVE_DVBT_PRO "rc-dntv-live-dvbt-pro"
#define RC_MAP_DTT200U "rc-dtt200u"
#define RC_MAP_DVBSKY "rc-dvbsky"
#define RC_MAP_DVICO_MCE "rc-dvico-mce"
#define RC_MAP_DVICO_PORTABLE "rc-dvico-portable"
#define RC_MAP_EMPTY "rc-empty"
#define RC_MAP_EM_TERRATEC "rc-em-terratec"
+#define RC_MAP_ENCORE_ENLTV "rc-encore-enltv"
#define RC_MAP_ENCORE_ENLTV2 "rc-encore-enltv2"
#define RC_MAP_ENCORE_ENLTV_FM53 "rc-encore-enltv-fm53"
-#define RC_MAP_ENCORE_ENLTV "rc-encore-enltv"
#define RC_MAP_EVGA_INDTUBE "rc-evga-indtube"
#define RC_MAP_EZTV "rc-eztv"
#define RC_MAP_FLYDVB "rc-flydvb"
@@ -201,6 +202,7 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_GEEKBOX "rc-geekbox"
#define RC_MAP_GENIUS_TVGO_A11MCE "rc-genius-tvgo-a11mce"
#define RC_MAP_GOTVIEW7135 "rc-gotview7135"
+#define RC_MAP_HAUPPAUGE "rc-hauppauge"
#define RC_MAP_HAUPPAUGE_NEW "rc-hauppauge"
#define RC_MAP_HISI_POPLAR "rc-hisi-poplar"
#define RC_MAP_HISI_TV_DEMO "rc-hisi-tv-demo"
@@ -223,8 +225,8 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_MEDION_X10_OR2X "rc-medion-x10-or2x"
#define RC_MAP_MSI_DIGIVOX_II "rc-msi-digivox-ii"
#define RC_MAP_MSI_DIGIVOX_III "rc-msi-digivox-iii"
-#define RC_MAP_MSI_TVANYWHERE_PLUS "rc-msi-tvanywhere-plus"
#define RC_MAP_MSI_TVANYWHERE "rc-msi-tvanywhere"
+#define RC_MAP_MSI_TVANYWHERE_PLUS "rc-msi-tvanywhere-plus"
#define RC_MAP_NEBULA "rc-nebula"
#define RC_MAP_NEC_TERRATEC_CINERGY_XS "rc-nec-terratec-cinergy-xs"
#define RC_MAP_NORWOOD "rc-norwood"
@@ -234,21 +236,21 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_PINNACLE_COLOR "rc-pinnacle-color"
#define RC_MAP_PINNACLE_GREY "rc-pinnacle-grey"
#define RC_MAP_PINNACLE_PCTV_HD "rc-pinnacle-pctv-hd"
-#define RC_MAP_PIXELVIEW_NEW "rc-pixelview-new"
#define RC_MAP_PIXELVIEW "rc-pixelview"
-#define RC_MAP_PIXELVIEW_002T "rc-pixelview-002t"
+#define RC_MAP_PIXELVIEW_002T "rc-pixelview-002t"
#define RC_MAP_PIXELVIEW_MK12 "rc-pixelview-mk12"
+#define RC_MAP_PIXELVIEW_NEW "rc-pixelview-new"
#define RC_MAP_POWERCOLOR_REAL_ANGEL "rc-powercolor-real-angel"
#define RC_MAP_PROTEUS_2309 "rc-proteus-2309"
#define RC_MAP_PURPLETV "rc-purpletv"
#define RC_MAP_PV951 "rc-pv951"
-#define RC_MAP_HAUPPAUGE "rc-hauppauge"
#define RC_MAP_RC5_TV "rc-rc5-tv"
#define RC_MAP_RC6_MCE "rc-rc6-mce"
#define RC_MAP_REAL_AUDIO_220_32_KEYS "rc-real-audio-220-32-keys"
#define RC_MAP_REDDO "rc-reddo"
#define RC_MAP_SNAPSTREAM_FIREFLY "rc-snapstream-firefly"
#define RC_MAP_STREAMZAP "rc-streamzap"
+#define RC_MAP_SU3000 "rc-su3000"
#define RC_MAP_TANGO "rc-tango"
#define RC_MAP_TANIX_TX3MINI "rc-tanix-tx3mini"
#define RC_MAP_TANIX_TX5MAX "rc-tanix-tx5max"
@@ -268,16 +270,17 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_TT_1500 "rc-tt-1500"
#define RC_MAP_TWINHAN_DTV_CAB_CI "rc-twinhan-dtv-cab-ci"
#define RC_MAP_TWINHAN_VP1027_DVBS "rc-twinhan1027"
+#define RC_MAP_VEGA_S9X "rc-vega-s9x"
#define RC_MAP_VIDEOMATE_K100 "rc-videomate-k100"
#define RC_MAP_VIDEOMATE_S350 "rc-videomate-s350"
#define RC_MAP_VIDEOMATE_TV_PVR "rc-videomate-tv-pvr"
+#define RC_MAP_KII_PRO "rc-videostrong-kii-pro"
#define RC_MAP_WETEK_HUB "rc-wetek-hub"
#define RC_MAP_WETEK_PLAY2 "rc-wetek-play2"
#define RC_MAP_WINFAST "rc-winfast"
#define RC_MAP_WINFAST_USBII_DELUXE "rc-winfast-usbii-deluxe"
-#define RC_MAP_SU3000 "rc-su3000"
-#define RC_MAP_XBOX_DVD "rc-xbox-dvd"
#define RC_MAP_X96MAX "rc-x96max"
+#define RC_MAP_XBOX_DVD "rc-xbox-dvd"
#define RC_MAP_ZX_IRDEC "rc-zx-irdec"
/*
diff --git a/include/media/v4l2-common.h b/include/media/v4l2-common.h
index c070d8ae11e5..150ee16ebd81 100644
--- a/include/media/v4l2-common.h
+++ b/include/media/v4l2-common.h
@@ -14,6 +14,7 @@
#ifndef V4L2_COMMON_H_
#define V4L2_COMMON_H_
+#include <linux/time.h>
#include <media/v4l2-dev.h>
/* Common printk constructs for v4l-i2c drivers. These macros create a unique
@@ -457,8 +458,24 @@ int v4l2_s_parm_cap(struct video_device *vdev,
/* Pixel format and FourCC helpers */
/**
+ * enum v4l2_pixel_encoding - specifies the pixel encoding value
+ *
+ * @V4L2_PIXEL_ENC_UNKNOWN: Pixel encoding is unknown/un-initialized
+ * @V4L2_PIXEL_ENC_YUV: Pixel encoding is YUV
+ * @V4L2_PIXEL_ENC_RGB: Pixel encoding is RGB
+ * @V4L2_PIXEL_ENC_BAYER: Pixel encoding is Bayer
+ */
+enum v4l2_pixel_encoding {
+ V4L2_PIXEL_ENC_UNKNOWN = 0,
+ V4L2_PIXEL_ENC_YUV = 1,
+ V4L2_PIXEL_ENC_RGB = 2,
+ V4L2_PIXEL_ENC_BAYER = 3,
+};
+
+/**
* struct v4l2_format_info - information about a V4L2 format
* @format: 4CC format identifier (V4L2_PIX_FMT_*)
+ * @pixel_enc: Pixel encoding (see enum v4l2_pixel_encoding above)
* @mem_planes: Number of memory planes, which includes the alpha plane (1 to 4).
* @comp_planes: Number of component planes, which includes the alpha plane (1 to 4).
* @bpp: Array of per-plane bytes per pixel
@@ -469,6 +486,7 @@ int v4l2_s_parm_cap(struct video_device *vdev,
*/
struct v4l2_format_info {
u32 format;
+ u8 pixel_enc;
u8 mem_planes;
u8 comp_planes;
u8 bpp[4];
@@ -478,8 +496,22 @@ struct v4l2_format_info {
u8 block_h[4];
};
-const struct v4l2_format_info *v4l2_format_info(u32 format);
+static inline bool v4l2_is_format_rgb(const struct v4l2_format_info *f)
+{
+ return f && f->pixel_enc == V4L2_PIXEL_ENC_RGB;
+}
+
+static inline bool v4l2_is_format_yuv(const struct v4l2_format_info *f)
+{
+ return f && f->pixel_enc == V4L2_PIXEL_ENC_YUV;
+}
+static inline bool v4l2_is_format_bayer(const struct v4l2_format_info *f)
+{
+ return f && f->pixel_enc == V4L2_PIXEL_ENC_BAYER;
+}
+
+const struct v4l2_format_info *v4l2_format_info(u32 format);
void v4l2_apply_frmsize_constraints(u32 *width, u32 *height,
const struct v4l2_frmsize_stepwise *frmsize);
int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
@@ -487,4 +519,24 @@ int v4l2_fill_pixfmt(struct v4l2_pix_format *pixfmt, u32 pixelformat,
int v4l2_fill_pixfmt_mp(struct v4l2_pix_format_mplane *pixfmt, u32 pixelformat,
u32 width, u32 height);
+static inline u64 v4l2_buffer_get_timestamp(const struct v4l2_buffer *buf)
+{
+ /*
+ * When the timestamp comes from 32-bit user space, there may be
+ * uninitialized data in tv_usec, so cast it to u32.
+ * Otherwise allow invalid input for backwards compatibility.
+ */
+ return buf->timestamp.tv_sec * NSEC_PER_SEC +
+ (u32)buf->timestamp.tv_usec * NSEC_PER_USEC;
+}
+
+static inline void v4l2_buffer_set_timestamp(struct v4l2_buffer *buf,
+ u64 timestamp)
+{
+ struct timespec64 ts = ns_to_timespec64(timestamp);
+
+ buf->timestamp.tv_sec = ts.tv_sec;
+ buf->timestamp.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
+}
+
#endif /* V4L2_COMMON_H_ */
diff --git a/include/media/v4l2-ctrls.h b/include/media/v4l2-ctrls.h
index 570ff4b0205a..7db9e719a583 100644
--- a/include/media/v4l2-ctrls.h
+++ b/include/media/v4l2-ctrls.h
@@ -21,6 +21,7 @@
#include <media/fwht-ctrls.h>
#include <media/h264-ctrls.h>
#include <media/vp8-ctrls.h>
+#include <media/hevc-ctrls.h>
/* forward references */
struct file;
@@ -50,7 +51,12 @@ struct poll_table_struct;
* @p_h264_slice_params: Pointer to a struct v4l2_ctrl_h264_slice_params.
* @p_h264_decode_params: Pointer to a struct v4l2_ctrl_h264_decode_params.
* @p_vp8_frame_header: Pointer to a VP8 frame header structure.
+ * @p_hevc_sps: Pointer to an HEVC sequence parameter set structure.
+ * @p_hevc_pps: Pointer to an HEVC picture parameter set structure.
+ * @p_hevc_slice_params: Pointer to an HEVC slice parameters structure.
+ * @p_area: Pointer to an area.
* @p: Pointer to a compound value.
+ * @p_const: Pointer to a constant compound value.
*/
union v4l2_ctrl_ptr {
s32 *p_s32;
@@ -68,10 +74,27 @@ union v4l2_ctrl_ptr {
struct v4l2_ctrl_h264_slice_params *p_h264_slice_params;
struct v4l2_ctrl_h264_decode_params *p_h264_decode_params;
struct v4l2_ctrl_vp8_frame_header *p_vp8_frame_header;
+ struct v4l2_ctrl_hevc_sps *p_hevc_sps;
+ struct v4l2_ctrl_hevc_pps *p_hevc_pps;
+ struct v4l2_ctrl_hevc_slice_params *p_hevc_slice_params;
+ struct v4l2_area *p_area;
void *p;
+ const void *p_const;
};
/**
+ * v4l2_ctrl_ptr_create() - Helper function to return a v4l2_ctrl_ptr from a
+ * void pointer
+ * @ptr: The void pointer
+ */
+static inline union v4l2_ctrl_ptr v4l2_ctrl_ptr_create(void *ptr)
+{
+ union v4l2_ctrl_ptr p = { .p = ptr };
+
+ return p;
+}
+
+/**
* struct v4l2_ctrl_ops - The control operations that the driver has to provide.
*
* @g_volatile_ctrl: Get a new value for this control. Generally only relevant
@@ -200,6 +223,9 @@ typedef void (*v4l2_ctrl_notify_fnc)(struct v4l2_ctrl *ctrl, void *priv);
* not freed when the control is deleted. Should this be needed
* then a new internal bitfield can be added to tell the framework
* to free this pointer.
+ * @p_def: The control's default value represented via a union which
+ * provides a standard way of accessing control types
+ * through a pointer (for compound controls only).
* @p_cur: The control's current value represented via a union which
* provides a standard way of accessing control types
* through a pointer.
@@ -254,6 +280,7 @@ struct v4l2_ctrl {
s32 val;
} cur;
+ union v4l2_ctrl_ptr p_def;
union v4l2_ctrl_ptr p_new;
union v4l2_ctrl_ptr p_cur;
};
@@ -357,6 +384,7 @@ struct v4l2_ctrl_handler {
* @max: The control's maximum value.
* @step: The control's step value for non-menu controls.
* @def: The control's default value.
+ * @p_def: The control's default value for compound controls.
* @dims: The size of each dimension.
* @elem_size: The size in bytes of the control.
* @flags: The control's flags.
@@ -385,6 +413,7 @@ struct v4l2_ctrl_config {
s64 max;
u64 step;
s64 def;
+ union v4l2_ctrl_ptr p_def;
u32 dims[V4L2_CTRL_MAX_DIMS];
u32 elem_size;
u32 flags;
@@ -647,6 +676,24 @@ struct v4l2_ctrl *v4l2_ctrl_new_std_menu_items(struct v4l2_ctrl_handler *hdl,
const char * const *qmenu);
/**
+ * v4l2_ctrl_new_std_compound() - Allocate and initialize a new standard V4L2
+ * compound control.
+ *
+ * @hdl: The control handler.
+ * @ops: The control ops.
+ * @id: The control ID.
+ * @p_def: The control's default value.
+ *
+ * Sames as v4l2_ctrl_new_std(), but with support to compound controls, thanks
+ * to the @p_def field.
+ *
+ */
+struct v4l2_ctrl *v4l2_ctrl_new_std_compound(struct v4l2_ctrl_handler *hdl,
+ const struct v4l2_ctrl_ops *ops,
+ u32 id,
+ const union v4l2_ctrl_ptr p_def);
+
+/**
* v4l2_ctrl_new_int_menu() - Create a new standard V4L2 integer menu control.
*
* @hdl: The control handler.
@@ -1065,6 +1112,46 @@ static inline int v4l2_ctrl_s_ctrl_string(struct v4l2_ctrl *ctrl, const char *s)
return rval;
}
+/**
+ * __v4l2_ctrl_s_ctrl_area() - Unlocked variant of v4l2_ctrl_s_ctrl_area().
+ *
+ * @ctrl: The control.
+ * @area: The new area.
+ *
+ * This sets the control's new area safely by going through the control
+ * framework. This function assumes the control's handler is already locked,
+ * allowing it to be used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for area type controls only.
+ */
+int __v4l2_ctrl_s_ctrl_area(struct v4l2_ctrl *ctrl,
+ const struct v4l2_area *area);
+
+/**
+ * v4l2_ctrl_s_ctrl_area() - Helper function to set a control's area value
+ * from within a driver.
+ *
+ * @ctrl: The control.
+ * @area: The new area.
+ *
+ * This sets the control's new area safely by going through the control
+ * framework. This function will lock the control's handler, so it cannot be
+ * used from within the &v4l2_ctrl_ops functions.
+ *
+ * This function is for area type controls only.
+ */
+static inline int v4l2_ctrl_s_ctrl_area(struct v4l2_ctrl *ctrl,
+ const struct v4l2_area *area)
+{
+ int rval;
+
+ v4l2_ctrl_lock(ctrl);
+ rval = __v4l2_ctrl_s_ctrl_area(ctrl, area);
+ v4l2_ctrl_unlock(ctrl);
+
+ return rval;
+}
+
/* Internal helper functions that deal with control events. */
extern const struct v4l2_subscribed_event_ops v4l2_ctrl_sub_ev_ops;
diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
index 48531e57cc5a..4602c15ff878 100644
--- a/include/media/v4l2-dev.h
+++ b/include/media/v4l2-dev.h
@@ -24,7 +24,7 @@
/**
* enum vfl_devnode_type - type of V4L2 device node
*
- * @VFL_TYPE_GRABBER: for video input/output devices
+ * @VFL_TYPE_VIDEO: for video input/output devices
* @VFL_TYPE_VBI: for vertical blank data (i.e. closed captions, teletext)
* @VFL_TYPE_RADIO: for radio tuners
* @VFL_TYPE_SUBDEV: for V4L2 subdevices
@@ -33,7 +33,7 @@
* @VFL_TYPE_MAX: number of VFL types, must always be last in the enum
*/
enum vfl_devnode_type {
- VFL_TYPE_GRABBER = 0,
+ VFL_TYPE_VIDEO,
VFL_TYPE_VBI,
VFL_TYPE_RADIO,
VFL_TYPE_SUBDEV,
diff --git a/include/media/v4l2-device.h b/include/media/v4l2-device.h
index e0b8f2602670..7c912b7d2870 100644
--- a/include/media/v4l2-device.h
+++ b/include/media/v4l2-device.h
@@ -72,7 +72,7 @@ static inline void v4l2_device_get(struct v4l2_device *v4l2_dev)
}
/**
- * v4l2_device_put - putss a V4L2 device reference
+ * v4l2_device_put - puts a V4L2 device reference
*
* @v4l2_dev: pointer to struct &v4l2_device
*
@@ -240,7 +240,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
* @f: operation function that will be called if @cond matches.
* The operation functions are defined in groups, according to
* each element at &struct v4l2_subdev_ops.
- * @args...: arguments for @f.
+ * @args: arguments for @f.
*
* Ignore any errors.
*
@@ -265,7 +265,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
* @f: operation function that will be called if @cond matches.
* The operation functions are defined in groups, according to
* each element at &struct v4l2_subdev_ops.
- * @args...: arguments for @f.
+ * @args: arguments for @f.
*
* Ignore any errors.
*
@@ -293,7 +293,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
* @f: operation function that will be called if @cond matches.
* The operation functions are defined in groups, according to
* each element at &struct v4l2_subdev_ops.
- * @args...: arguments for @f.
+ * @args: arguments for @f.
*
* Return:
*
@@ -328,7 +328,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
* @f: operation function that will be called if @cond matches.
* The operation functions are defined in groups, according to
* each element at &struct v4l2_subdev_ops.
- * @args...: arguments for @f.
+ * @args: arguments for @f.
*
* Return:
*
@@ -359,7 +359,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
* @f: operation function that will be called if @cond matches.
* The operation functions are defined in groups, according to
* each element at &struct v4l2_subdev_ops.
- * @args...: arguments for @f.
+ * @args: arguments for @f.
*
* Ignore any errors.
*
@@ -371,7 +371,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
struct v4l2_subdev *__sd; \
\
__v4l2_device_call_subdevs_p(v4l2_dev, __sd, \
- !(grpid) || __sd->grp_id == (grpid), o, f , \
+ (grpid) == 0 || __sd->grp_id == (grpid), o, f , \
##args); \
} while (0)
@@ -388,7 +388,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
* @f: operation function that will be called if @cond matches.
* The operation functions are defined in groups, according to
* each element at &struct v4l2_subdev_ops.
- * @args...: arguments for @f.
+ * @args: arguments for @f.
*
* Return:
*
@@ -403,7 +403,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
({ \
struct v4l2_subdev *__sd; \
__v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \
- !(grpid) || __sd->grp_id == (grpid), o, f , \
+ (grpid) == 0 || __sd->grp_id == (grpid), o, f , \
##args); \
})
@@ -419,7 +419,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
* @f: operation function that will be called if @cond matches.
* The operation functions are defined in groups, according to
* each element at &struct v4l2_subdev_ops.
- * @args...: arguments for @f.
+ * @args: arguments for @f.
*
* Ignore any errors.
*
@@ -431,8 +431,8 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
struct v4l2_subdev *__sd; \
\
__v4l2_device_call_subdevs_p(v4l2_dev, __sd, \
- !(grpmsk) || (__sd->grp_id & (grpmsk)), o, f , \
- ##args); \
+ (grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o, \
+ f , ##args); \
} while (0)
/**
@@ -447,7 +447,7 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
* @f: operation function that will be called if @cond matches.
* The operation functions are defined in groups, according to
* each element at &struct v4l2_subdev_ops.
- * @args...: arguments for @f.
+ * @args: arguments for @f.
*
* Return:
*
@@ -462,8 +462,8 @@ static inline bool v4l2_device_supports_requests(struct v4l2_device *v4l2_dev)
({ \
struct v4l2_subdev *__sd; \
__v4l2_device_call_subdevs_until_err_p(v4l2_dev, __sd, \
- !(grpmsk) || (__sd->grp_id & (grpmsk)), o, f , \
- ##args); \
+ (grpmsk) == 0 || (__sd->grp_id & (grpmsk)), o, \
+ f , ##args); \
})
diff --git a/include/media/v4l2-fwnode.h b/include/media/v4l2-fwnode.h
index f6a7bcd13197..dd82d6d9764e 100644
--- a/include/media/v4l2-fwnode.h
+++ b/include/media/v4l2-fwnode.h
@@ -113,14 +113,75 @@ struct v4l2_fwnode_endpoint {
* struct v4l2_fwnode_link - a link between two endpoints
* @local_node: pointer to device_node of this endpoint
* @local_port: identifier of the port this endpoint belongs to
+ * @local_id: identifier of the id this endpoint belongs to
* @remote_node: pointer to device_node of the remote endpoint
* @remote_port: identifier of the port the remote endpoint belongs to
+ * @remote_id: identifier of the id the remote endpoint belongs to
*/
struct v4l2_fwnode_link {
struct fwnode_handle *local_node;
unsigned int local_port;
+ unsigned int local_id;
struct fwnode_handle *remote_node;
unsigned int remote_port;
+ unsigned int remote_id;
+};
+
+/**
+ * enum v4l2_connector_type - connector type
+ * @V4L2_CONN_UNKNOWN: unknown connector type, no V4L2 connector configuration
+ * @V4L2_CONN_COMPOSITE: analog composite connector
+ * @V4L2_CONN_SVIDEO: analog svideo connector
+ */
+enum v4l2_connector_type {
+ V4L2_CONN_UNKNOWN,
+ V4L2_CONN_COMPOSITE,
+ V4L2_CONN_SVIDEO,
+};
+
+/**
+ * struct v4l2_connector_link - connector link data structure
+ * @head: structure to be used to add the link to the
+ * &struct v4l2_fwnode_connector
+ * @fwnode_link: &struct v4l2_fwnode_link link between the connector and the
+ * device the connector belongs to.
+ */
+struct v4l2_connector_link {
+ struct list_head head;
+ struct v4l2_fwnode_link fwnode_link;
+};
+
+/**
+ * struct v4l2_fwnode_connector_analog - analog connector data structure
+ * @sdtv_stds: sdtv standards this connector supports, set to V4L2_STD_ALL
+ * if no restrictions are specified.
+ */
+struct v4l2_fwnode_connector_analog {
+ v4l2_std_id sdtv_stds;
+};
+
+/**
+ * struct v4l2_fwnode_connector - the connector data structure
+ * @name: the connector device name
+ * @label: optional connector label
+ * @type: connector type
+ * @links: list of all connector &struct v4l2_connector_link links
+ * @nr_of_links: total number of links
+ * @connector: connector configuration
+ * @connector.analog: analog connector configuration
+ * &struct v4l2_fwnode_connector_analog
+ */
+struct v4l2_fwnode_connector {
+ const char *name;
+ const char *label;
+ enum v4l2_connector_type type;
+ struct list_head links;
+ unsigned int nr_of_links;
+
+ union {
+ struct v4l2_fwnode_connector_analog analog;
+ /* future connectors */
+ } connector;
};
/**
@@ -234,6 +295,66 @@ int v4l2_fwnode_parse_link(struct fwnode_handle *fwnode,
void v4l2_fwnode_put_link(struct v4l2_fwnode_link *link);
/**
+ * v4l2_fwnode_connector_free() - free the V4L2 connector acquired memory
+ * @connector: the V4L2 connector resources of which are to be released
+ *
+ * Free all allocated memory and put all links acquired by
+ * v4l2_fwnode_connector_parse() and v4l2_fwnode_connector_add_link().
+ *
+ * It is safe to call this function with NULL argument or on a V4L2 connector
+ * the parsing of which failed.
+ */
+void v4l2_fwnode_connector_free(struct v4l2_fwnode_connector *connector);
+
+/**
+ * v4l2_fwnode_connector_parse() - initialize the 'struct v4l2_fwnode_connector'
+ * @fwnode: pointer to the subdev endpoint's fwnode handle where the connector
+ * is connected to or to the connector endpoint fwnode handle.
+ * @connector: pointer to the V4L2 fwnode connector data structure
+ *
+ * Fill the &struct v4l2_fwnode_connector with the connector type, label and
+ * all &enum v4l2_connector_type specific connector data. The label is optional
+ * so it is set to %NULL if no one was found. The function initialize the links
+ * to zero. Adding links to the connector is done by calling
+ * v4l2_fwnode_connector_add_link().
+ *
+ * The memory allocated for the label must be freed when no longer needed.
+ * Freeing the memory is done by v4l2_fwnode_connector_free().
+ *
+ * Return:
+ * * %0 on success or a negative error code on failure:
+ * * %-EINVAL if @fwnode is invalid
+ * * %-ENOTCONN if connector type is unknown or connector device can't be found
+ */
+int v4l2_fwnode_connector_parse(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_connector *connector);
+
+/**
+ * v4l2_fwnode_connector_add_link - add a link between a connector node and
+ * a v4l2-subdev node.
+ * @fwnode: pointer to the subdev endpoint's fwnode handle where the connector
+ * is connected to
+ * @connector: pointer to the V4L2 fwnode connector data structure
+ *
+ * Add a new &struct v4l2_connector_link link to the
+ * &struct v4l2_fwnode_connector connector links list. The link local_node
+ * points to the connector node, the remote_node to the host v4l2 (sub)dev.
+ *
+ * The taken references to remote_node and local_node must be dropped and the
+ * allocated memory must be freed when no longer needed. Both is done by calling
+ * v4l2_fwnode_connector_free().
+ *
+ * Return:
+ * * %0 on success or a negative error code on failure:
+ * * %-EINVAL if @fwnode or @connector is invalid or @connector type is unknown
+ * * %-ENOMEM on link memory allocation failure
+ * * %-ENOTCONN if remote connector device can't be found
+ * * %-ENOLINK if link parsing between v4l2 (sub)dev and connector fails
+ */
+int v4l2_fwnode_connector_add_link(struct fwnode_handle *fwnode,
+ struct v4l2_fwnode_connector *connector);
+
+/**
* typedef parse_endpoint_func - Driver's callback function to be called on
* each V4L2 fwnode endpoint.
*
@@ -406,4 +527,26 @@ v4l2_async_register_fwnode_subdev(struct v4l2_subdev *sd,
unsigned int num_ports,
parse_endpoint_func parse_endpoint);
+/* Helper macros to access the connector links. */
+
+/** v4l2_connector_last_link - Helper macro to get the first
+ * &struct v4l2_fwnode_connector link
+ * @v4l2c: &struct v4l2_fwnode_connector owning the connector links
+ *
+ * This marco returns the first added &struct v4l2_connector_link connector
+ * link or @NULL if the connector has no links.
+ */
+#define v4l2_connector_first_link(v4l2c) \
+ list_first_entry_or_null(&(v4l2c)->links, \
+ struct v4l2_connector_link, head)
+
+/** v4l2_connector_last_link - Helper macro to get the last
+ * &struct v4l2_fwnode_connector link
+ * @v4l2c: &struct v4l2_fwnode_connector owning the connector links
+ *
+ * This marco returns the last &struct v4l2_connector_link added connector link.
+ */
+#define v4l2_connector_last_link(v4l2c) \
+ list_last_entry(&(v4l2c)->links, struct v4l2_connector_link, head)
+
#endif /* _V4L2_FWNODE_H */
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 4bba65a59d46..86878fba332b 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -724,4 +724,59 @@ long int video_usercopy(struct file *file, unsigned int cmd,
long int video_ioctl2(struct file *file,
unsigned int cmd, unsigned long int arg);
+/*
+ * The user space interpretation of the 'v4l2_event' differs
+ * based on the 'time_t' definition on 32-bit architectures, so
+ * the kernel has to handle both.
+ * This is the old version for 32-bit architectures.
+ */
+struct v4l2_event_time32 {
+ __u32 type;
+ union {
+ struct v4l2_event_vsync vsync;
+ struct v4l2_event_ctrl ctrl;
+ struct v4l2_event_frame_sync frame_sync;
+ struct v4l2_event_src_change src_change;
+ struct v4l2_event_motion_det motion_det;
+ __u8 data[64];
+ } u;
+ __u32 pending;
+ __u32 sequence;
+ struct old_timespec32 timestamp;
+ __u32 id;
+ __u32 reserved[8];
+};
+
+#define VIDIOC_DQEVENT_TIME32 _IOR('V', 89, struct v4l2_event_time32)
+
+struct v4l2_buffer_time32 {
+ __u32 index;
+ __u32 type;
+ __u32 bytesused;
+ __u32 flags;
+ __u32 field;
+ struct old_timeval32 timestamp;
+ struct v4l2_timecode timecode;
+ __u32 sequence;
+
+ /* memory location */
+ __u32 memory;
+ union {
+ __u32 offset;
+ unsigned long userptr;
+ struct v4l2_plane *planes;
+ __s32 fd;
+ } m;
+ __u32 length;
+ __u32 reserved2;
+ union {
+ __s32 request_fd;
+ __u32 reserved;
+ };
+};
+#define VIDIOC_QUERYBUF_TIME32 _IOWR('V', 9, struct v4l2_buffer_time32)
+#define VIDIOC_QBUF_TIME32 _IOWR('V', 15, struct v4l2_buffer_time32)
+#define VIDIOC_DQBUF_TIME32 _IOWR('V', 17, struct v4l2_buffer_time32)
+#define VIDIOC_PREPARE_BUF_TIME32 _IOWR('V', 93, struct v4l2_buffer_time32)
+
#endif /* _V4L2_IOCTL_H */
diff --git a/include/media/v4l2-mc.h b/include/media/v4l2-mc.h
index 384960249f01..5e73eb8e28f6 100644
--- a/include/media/v4l2-mc.h
+++ b/include/media/v4l2-mc.h
@@ -86,23 +86,30 @@ int v4l_vb2q_enable_media_source(struct vb2_queue *q);
/**
- * v4l2_pipeline_pm_use - Update the use count of an entity
- * @entity: The entity
- * @use: Use (1) or stop using (0) the entity
+ * v4l2_pipeline_pm_get - Increase the use count of a pipeline
+ * @entity: The root entity of a pipeline
*
- * Update the use count of all entities in the pipeline and power entities on or
- * off accordingly.
+ * Update the use count of all entities in the pipeline and power entities on.
*
- * This function is intended to be called in video node open (use ==
- * 1) and release (use == 0). It uses struct media_entity.use_count to
- * track the power status. The use of this function should be paired
- * with v4l2_pipeline_link_notify().
+ * This function is intended to be called in video node open. It uses
+ * struct media_entity.use_count to track the power status. The use
+ * of this function should be paired with v4l2_pipeline_link_notify().
*
- * Return 0 on success or a negative error code on failure. Powering entities
- * off is assumed to never fail. No failure can occur when the use parameter is
- * set to 0.
+ * Return 0 on success or a negative error code on failure.
*/
-int v4l2_pipeline_pm_use(struct media_entity *entity, int use);
+int v4l2_pipeline_pm_get(struct media_entity *entity);
+
+/**
+ * v4l2_pipeline_pm_put - Decrease the use count of a pipeline
+ * @entity: The root entity of a pipeline
+ *
+ * Update the use count of all entities in the pipeline and power entities off.
+ *
+ * This function is intended to be called in video node release. It uses
+ * struct media_entity.use_count to track the power status. The use
+ * of this function should be paired with v4l2_pipeline_link_notify().
+ */
+void v4l2_pipeline_pm_put(struct media_entity *entity);
/**
@@ -114,7 +121,7 @@ int v4l2_pipeline_pm_use(struct media_entity *entity, int use);
* React to link management on powered pipelines by updating the use count of
* all entities in the source and sink sides of the link. Entities are powered
* on or off accordingly. The use of this function should be paired
- * with v4l2_pipeline_pm_use().
+ * with v4l2_pipeline_pm_{get,put}().
*
* Return 0 on success or a negative error code on failure. Powering entities
* off is assumed to never fail. This function will not fail for disconnection
@@ -144,11 +151,14 @@ static inline int v4l_vb2q_enable_media_source(struct vb2_queue *q)
return 0;
}
-static inline int v4l2_pipeline_pm_use(struct media_entity *entity, int use)
+static inline int v4l2_pipeline_pm_get(struct media_entity *entity)
{
return 0;
}
+static inline void v4l2_pipeline_pm_put(struct media_entity *entity)
+{}
+
static inline int v4l2_pipeline_link_notify(struct media_link *link, u32 flags,
unsigned int notification)
{
diff --git a/include/media/v4l2-mem2mem.h b/include/media/v4l2-mem2mem.h
index 0b9c3a287061..98753f00df7e 100644
--- a/include/media/v4l2-mem2mem.h
+++ b/include/media/v4l2-mem2mem.h
@@ -21,7 +21,8 @@
* callback.
* The job does NOT have to end before this callback returns
* (and it will be the usual case). When the job finishes,
- * v4l2_m2m_job_finish() has to be called.
+ * v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish()
+ * has to be called.
* @job_ready: optional. Should return 0 if the driver does not have a job
* fully prepared to run yet (i.e. it will not be able to finish a
* transaction without sleeping). If not provided, it will be
@@ -33,7 +34,8 @@
* stop the device safely; e.g. in the next interrupt handler),
* even if the transaction would not have been finished by then.
* After the driver performs the necessary steps, it has to call
- * v4l2_m2m_job_finish() (as if the transaction ended normally).
+ * v4l2_m2m_job_finish() or v4l2_m2m_buf_done_and_job_finish() as
+ * if the transaction ended normally.
* This function does not have to (and will usually not) wait
* until the device enters a state when it can be stopped.
*/
@@ -73,6 +75,15 @@ struct v4l2_m2m_queue_ctx {
* struct v4l2_m2m_ctx - Memory to memory context structure
*
* @q_lock: struct &mutex lock
+ * @new_frame: valid in the device_run callback: if true, then this
+ * starts a new frame; if false, then this is a new slice
+ * for an existing frame. This is always true unless
+ * V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF is set, which
+ * indicates slicing support.
+ * @is_draining: indicates device is in draining phase
+ * @last_src_buf: indicate the last source buffer for draining
+ * @next_buf_last: next capture queud buffer will be tagged as last
+ * @has_stopped: indicate the device has been stopped
* @m2m_dev: opaque pointer to the internal data to handle M2M context
* @cap_q_ctx: Capture (output to memory) queue context
* @out_q_ctx: Output (input from memory) queue context
@@ -89,6 +100,13 @@ struct v4l2_m2m_ctx {
/* optional cap/out vb2 queues lock */
struct mutex *q_lock;
+ bool new_frame;
+
+ bool is_draining;
+ struct vb2_v4l2_buffer *last_src_buf;
+ bool next_buf_last;
+ bool has_stopped;
+
/* internal use only */
struct v4l2_m2m_dev *m2m_dev;
@@ -173,6 +191,33 @@ void v4l2_m2m_try_schedule(struct v4l2_m2m_ctx *m2m_ctx);
void v4l2_m2m_job_finish(struct v4l2_m2m_dev *m2m_dev,
struct v4l2_m2m_ctx *m2m_ctx);
+/**
+ * v4l2_m2m_buf_done_and_job_finish() - return source/destination buffers with
+ * state and inform the framework that a job has been finished and have it
+ * clean up
+ *
+ * @m2m_dev: opaque pointer to the internal data to handle M2M context
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @state: vb2 buffer state passed to v4l2_m2m_buf_done().
+ *
+ * Drivers that set V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF must use this
+ * function instead of job_finish() to take held buffers into account. It is
+ * optional for other drivers.
+ *
+ * This function removes the source buffer from the ready list and returns
+ * it with the given state. The same is done for the destination buffer, unless
+ * it is marked 'held'. In that case the buffer is kept on the ready list.
+ *
+ * After that the job is finished (see job_finish()).
+ *
+ * This allows for multiple output buffers to be used to fill in a single
+ * capture buffer. This is typically used by stateless decoders where
+ * multiple e.g. H.264 slices contribute to a single decoded frame.
+ */
+void v4l2_m2m_buf_done_and_job_finish(struct v4l2_m2m_dev *m2m_dev,
+ struct v4l2_m2m_ctx *m2m_ctx,
+ enum vb2_buffer_state state);
+
static inline void
v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
{
@@ -180,6 +225,86 @@ v4l2_m2m_buf_done(struct vb2_v4l2_buffer *buf, enum vb2_buffer_state state)
}
/**
+ * v4l2_m2m_clear_state() - clear encoding/decoding state
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ */
+static inline void
+v4l2_m2m_clear_state(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ m2m_ctx->next_buf_last = false;
+ m2m_ctx->is_draining = false;
+ m2m_ctx->has_stopped = false;
+}
+
+/**
+ * v4l2_m2m_mark_stopped() - set current encoding/decoding state as stopped
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ */
+static inline void
+v4l2_m2m_mark_stopped(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ m2m_ctx->next_buf_last = false;
+ m2m_ctx->is_draining = false;
+ m2m_ctx->has_stopped = true;
+}
+
+/**
+ * v4l2_m2m_dst_buf_is_last() - return the current encoding/decoding session
+ * draining management state of next queued capture buffer
+ *
+ * This last capture buffer should be tagged with V4L2_BUF_FLAG_LAST to notify
+ * the end of the capture session.
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ */
+static inline bool
+v4l2_m2m_dst_buf_is_last(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ return m2m_ctx->is_draining && m2m_ctx->next_buf_last;
+}
+
+/**
+ * v4l2_m2m_has_stopped() - return the current encoding/decoding session
+ * stopped state
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ */
+static inline bool
+v4l2_m2m_has_stopped(struct v4l2_m2m_ctx *m2m_ctx)
+{
+ return m2m_ctx->has_stopped;
+}
+
+/**
+ * v4l2_m2m_is_last_draining_src_buf() - return the output buffer draining
+ * state in the current encoding/decoding session
+ *
+ * This will identify the last output buffer queued before a session stop
+ * was required, leading to an actual encoding/decoding session stop state
+ * in the encoding/decoding process after being processed.
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @vbuf: pointer to struct &v4l2_buffer
+ */
+static inline bool
+v4l2_m2m_is_last_draining_src_buf(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_v4l2_buffer *vbuf)
+{
+ return m2m_ctx->is_draining && vbuf == m2m_ctx->last_src_buf;
+}
+
+/**
+ * v4l2_m2m_last_buffer_done() - marks the buffer with LAST flag and DONE
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @vbuf: pointer to struct &v4l2_buffer
+ */
+void v4l2_m2m_last_buffer_done(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_v4l2_buffer *vbuf);
+
+/**
* v4l2_m2m_reqbufs() - multi-queue-aware REQBUFS multiplexer
*
* @file: pointer to struct &file
@@ -277,6 +402,46 @@ int v4l2_m2m_streamoff(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
enum v4l2_buf_type type);
/**
+ * v4l2_m2m_update_start_streaming_state() - update the encoding/decoding
+ * session state when a start of streaming of a video queue is requested
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @q: queue
+ */
+void v4l2_m2m_update_start_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_queue *q);
+
+/**
+ * v4l2_m2m_update_stop_streaming_state() - update the encoding/decoding
+ * session state when a stop of streaming of a video queue is requested
+ *
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @q: queue
+ */
+void v4l2_m2m_update_stop_streaming_state(struct v4l2_m2m_ctx *m2m_ctx,
+ struct vb2_queue *q);
+
+/**
+ * v4l2_m2m_encoder_cmd() - execute an encoder command
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @ec: pointer to the encoder command
+ */
+int v4l2_m2m_encoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_encoder_cmd *ec);
+
+/**
+ * v4l2_m2m_decoder_cmd() - execute a decoder command
+ *
+ * @file: pointer to struct &file
+ * @m2m_ctx: m2m context assigned to the instance given by struct &v4l2_m2m_ctx
+ * @dc: pointer to the decoder command
+ */
+int v4l2_m2m_decoder_cmd(struct file *file, struct v4l2_m2m_ctx *m2m_ctx,
+ struct v4l2_decoder_cmd *dc);
+
+/**
* v4l2_m2m_poll() - poll replacement, for destination buffers only
*
* @file: pointer to struct &file
@@ -668,10 +833,18 @@ int v4l2_m2m_ioctl_streamon(struct file *file, void *fh,
enum v4l2_buf_type type);
int v4l2_m2m_ioctl_streamoff(struct file *file, void *fh,
enum v4l2_buf_type type);
+int v4l2_m2m_ioctl_encoder_cmd(struct file *file, void *fh,
+ struct v4l2_encoder_cmd *ec);
+int v4l2_m2m_ioctl_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc);
int v4l2_m2m_ioctl_try_encoder_cmd(struct file *file, void *fh,
struct v4l2_encoder_cmd *ec);
int v4l2_m2m_ioctl_try_decoder_cmd(struct file *file, void *fh,
struct v4l2_decoder_cmd *dc);
+int v4l2_m2m_ioctl_stateless_try_decoder_cmd(struct file *file, void *fh,
+ struct v4l2_decoder_cmd *dc);
+int v4l2_m2m_ioctl_stateless_decoder_cmd(struct file *file, void *priv,
+ struct v4l2_decoder_cmd *dc);
int v4l2_m2m_fop_mmap(struct file *file, struct vm_area_struct *vma);
__poll_t v4l2_m2m_fop_poll(struct file *file, poll_table *wait);
diff --git a/include/media/v4l2-rect.h b/include/media/v4l2-rect.h
index c86474dc7b55..8800a640c224 100644
--- a/include/media/v4l2-rect.h
+++ b/include/media/v4l2-rect.h
@@ -63,10 +63,10 @@ static inline void v4l2_rect_map_inside(struct v4l2_rect *r,
r->left = boundary->left;
if (r->top < boundary->top)
r->top = boundary->top;
- if (r->left + r->width > boundary->width)
- r->left = boundary->width - r->width;
- if (r->top + r->height > boundary->height)
- r->top = boundary->height - r->height;
+ if (r->left + r->width > boundary->left + boundary->width)
+ r->left = boundary->left + boundary->width - r->width;
+ if (r->top + r->height > boundary->top + boundary->height)
+ r->top = boundary->top + boundary->height - r->height;
}
/**
diff --git a/include/media/v4l2-subdev.h b/include/media/v4l2-subdev.h
index 71f1f2f0da53..a4848de59852 100644
--- a/include/media/v4l2-subdev.h
+++ b/include/media/v4l2-subdev.h
@@ -1090,10 +1090,10 @@ extern const struct v4l2_subdev_ops v4l2_subdev_call_wrappers;
* @sd: pointer to the &struct v4l2_subdev
* @o: name of the element at &struct v4l2_subdev_ops that contains @f.
* Each element there groups a set of callbacks functions.
- * @f: callback function that will be called if @cond matches.
+ * @f: callback function to be called.
* The callback functions are defined in groups, according to
* each element at &struct v4l2_subdev_ops.
- * @args...: arguments for @f.
+ * @args: arguments for @f.
*
* Example: err = v4l2_subdev_call(sd, video, s_std, norm);
*/
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index 640aabe69450..f11b96514cf7 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -505,10 +505,15 @@ struct vb2_buf_ops {
* @buf_ops: callbacks to deliver buffer information.
* between user-space and kernel-space.
* @drv_priv: driver private data.
+ * @subsystem_flags: Flags specific to the subsystem (V4L2/DVB/etc.). Not used
+ * by the vb2 core.
* @buf_struct_size: size of the driver-specific buffer structure;
* "0" indicates the driver doesn't want to use a custom buffer
- * structure type. for example, ``sizeof(struct vb2_v4l2_buffer)``
- * will be used for v4l2.
+ * structure type. In that case a subsystem-specific struct
+ * will be used (in the case of V4L2 that is
+ * ``sizeof(struct vb2_v4l2_buffer)``). The first field of the
+ * driver-specific buffer structure must be the subsystem-specific
+ * struct (vb2_v4l2_buffer in the case of V4L2).
* @timestamp_flags: Timestamp flags; ``V4L2_BUF_FLAG_TIMESTAMP_*`` and
* ``V4L2_BUF_FLAG_TSTAMP_SRC_*``
* @gfp_flags: additional gfp flags used when allocating the buffers.
@@ -571,6 +576,7 @@ struct vb2_queue {
const struct vb2_buf_ops *buf_ops;
void *drv_priv;
+ u32 subsystem_flags;
unsigned int buf_struct_size;
u32 timestamp_flags;
gfp_t gfp_flags;
diff --git a/include/media/videobuf2-v4l2.h b/include/media/videobuf2-v4l2.h
index 8a10889dc2fd..59bf33a12648 100644
--- a/include/media/videobuf2-v4l2.h
+++ b/include/media/videobuf2-v4l2.h
@@ -33,6 +33,7 @@
* @timecode: frame timecode.
* @sequence: sequence count of this frame.
* @request_fd: the request_fd associated with this buffer
+ * @is_held: if true, then this capture buffer was held
* @planes: plane information (userptr/fd, length, bytesused, data_offset).
*
* Should contain enough information to be able to cover all the fields
@@ -46,9 +47,13 @@ struct vb2_v4l2_buffer {
struct v4l2_timecode timecode;
__u32 sequence;
__s32 request_fd;
+ bool is_held;
struct vb2_plane planes[VB2_MAX_PLANES];
};
+/* VB2 V4L2 flags as set in vb2_queue.subsystem_flags */
+#define VB2_V4L2_FL_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 0)
+
/*
* to_vb2_v4l2_buffer() - cast struct vb2_buffer * to struct vb2_v4l2_buffer *
*/
diff --git a/include/net/act_api.h b/include/net/act_api.h
index b18c699681ca..71347a90a9d1 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -23,7 +23,6 @@ struct tc_action_ops;
struct tc_action {
const struct tc_action_ops *ops;
__u32 type; /* for backward compat(TCA_OLD_COMPAT) */
- __u32 order;
struct tcf_idrinfo *idrinfo;
u32 tcfa_index;
@@ -41,6 +40,7 @@ struct tc_action {
struct gnet_stats_queue __percpu *cpu_qstats;
struct tc_cookie __rcu *act_cookie;
struct tcf_chain __rcu *goto_chain;
+ u32 tcfa_flags;
};
#define tcf_index common.tcfa_index
#define tcf_refcnt common.tcfa_refcnt
@@ -94,7 +94,7 @@ struct tc_action_ops {
int (*init)(struct net *net, struct nlattr *nla,
struct nlattr *est, struct tc_action **act, int ovr,
int bind, bool rtnl_held, struct tcf_proto *tp,
- struct netlink_ext_ack *extack);
+ u32 flags, struct netlink_ext_ack *extack);
int (*walk)(struct net *, struct sk_buff *,
struct netlink_callback *, int,
const struct tc_action_ops *,
@@ -154,7 +154,11 @@ int tcf_generic_walker(struct tc_action_net *tn, struct sk_buff *skb,
int tcf_idr_search(struct tc_action_net *tn, struct tc_action **a, u32 index);
int tcf_idr_create(struct tc_action_net *tn, u32 index, struct nlattr *est,
struct tc_action **a, const struct tc_action_ops *ops,
- int bind, bool cpustats);
+ int bind, bool cpustats, u32 flags);
+int tcf_idr_create_from_flags(struct tc_action_net *tn, u32 index,
+ struct nlattr *est, struct tc_action **a,
+ const struct tc_action_ops *ops, int bind,
+ u32 flags);
void tcf_idr_insert(struct tc_action_net *tn, struct tc_action *a);
void tcf_idr_cleanup(struct tc_action_net *tn, u32 index);
@@ -186,6 +190,43 @@ int tcf_action_dump(struct sk_buff *skb, struct tc_action *actions[], int bind,
int ref);
int tcf_action_dump_old(struct sk_buff *skb, struct tc_action *a, int, int);
int tcf_action_dump_1(struct sk_buff *skb, struct tc_action *a, int, int);
+
+static inline void tcf_action_update_bstats(struct tc_action *a,
+ struct sk_buff *skb)
+{
+ if (likely(a->cpu_bstats)) {
+ bstats_cpu_update(this_cpu_ptr(a->cpu_bstats), skb);
+ return;
+ }
+ spin_lock(&a->tcfa_lock);
+ bstats_update(&a->tcfa_bstats, skb);
+ spin_unlock(&a->tcfa_lock);
+}
+
+static inline void tcf_action_inc_drop_qstats(struct tc_action *a)
+{
+ if (likely(a->cpu_qstats)) {
+ qstats_drop_inc(this_cpu_ptr(a->cpu_qstats));
+ return;
+ }
+ spin_lock(&a->tcfa_lock);
+ qstats_drop_inc(&a->tcfa_qstats);
+ spin_unlock(&a->tcfa_lock);
+}
+
+static inline void tcf_action_inc_overlimit_qstats(struct tc_action *a)
+{
+ if (likely(a->cpu_qstats)) {
+ qstats_overlimit_inc(this_cpu_ptr(a->cpu_qstats));
+ return;
+ }
+ spin_lock(&a->tcfa_lock);
+ qstats_overlimit_inc(&a->tcfa_qstats);
+ spin_unlock(&a->tcfa_lock);
+}
+
+void tcf_action_update_stats(struct tc_action *a, u64 bytes, u32 packets,
+ bool drop, bool hw);
int tcf_action_copy_stats(struct sk_buff *, struct tc_action *, int);
int tcf_action_check_ctrlact(int action, struct tcf_proto *tp,
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 3f62b347b04a..a088349dd94f 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -202,11 +202,11 @@ u32 ipv6_addr_label(struct net *net, const struct in6_addr *addr,
/*
* multicast prototypes (mcast.c)
*/
-static inline int ipv6_mc_may_pull(struct sk_buff *skb,
- unsigned int len)
+static inline bool ipv6_mc_may_pull(struct sk_buff *skb,
+ unsigned int len)
{
if (skb_transport_offset(skb) + ipv6_transport_len(skb) < len)
- return 0;
+ return false;
return pskb_may_pull(skb, len);
}
@@ -437,7 +437,7 @@ static inline void addrconf_addr_solict_mult(const struct in6_addr *addr,
static inline bool ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
- __be64 *p = (__be64 *)addr;
+ __be64 *p = (__force __be64 *)addr;
return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) | (p[1] ^ cpu_to_be64(1))) == 0UL;
#else
return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
@@ -449,7 +449,7 @@ static inline bool ipv6_addr_is_ll_all_nodes(const struct in6_addr *addr)
static inline bool ipv6_addr_is_ll_all_routers(const struct in6_addr *addr)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
- __be64 *p = (__be64 *)addr;
+ __be64 *p = (__force __be64 *)addr;
return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) | (p[1] ^ cpu_to_be64(2))) == 0UL;
#else
return ((addr->s6_addr32[0] ^ htonl(0xff020000)) |
@@ -466,7 +466,7 @@ static inline bool ipv6_addr_is_isatap(const struct in6_addr *addr)
static inline bool ipv6_addr_is_solict_mult(const struct in6_addr *addr)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
- __be64 *p = (__be64 *)addr;
+ __be64 *p = (__force __be64 *)addr;
return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) |
((p[1] ^ cpu_to_be64(0x00000001ff000000UL)) &
cpu_to_be64(0xffffffffff000000UL))) == 0UL;
@@ -481,7 +481,7 @@ static inline bool ipv6_addr_is_solict_mult(const struct in6_addr *addr)
static inline bool ipv6_addr_is_all_snoopers(const struct in6_addr *addr)
{
#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) && BITS_PER_LONG == 64
- __be64 *p = (__be64 *)addr;
+ __be64 *p = (__force __be64 *)addr;
return ((p[0] ^ cpu_to_be64(0xff02000000000000UL)) |
(p[1] ^ cpu_to_be64(0x6a))) == 0UL;
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 1abae3c340a5..04e97bab6f28 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -16,6 +16,12 @@ struct sock;
struct socket;
struct rxrpc_call;
+enum rxrpc_interruptibility {
+ RXRPC_INTERRUPTIBLE, /* Call is interruptible */
+ RXRPC_PREINTERRUPTIBLE, /* Call can be cancelled whilst waiting for a slot */
+ RXRPC_UNINTERRUPTIBLE, /* Call should not be interruptible at all */
+};
+
/*
* Debug ID counter for tracing.
*/
@@ -41,7 +47,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *,
gfp_t,
rxrpc_notify_rx_t,
bool,
- bool,
+ enum rxrpc_interruptibility,
unsigned int);
int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
struct msghdr *, size_t,
@@ -58,9 +64,7 @@ int rxrpc_kernel_charge_accept(struct socket *, rxrpc_notify_rx_t,
rxrpc_user_attach_call_t, unsigned long, gfp_t,
unsigned int);
void rxrpc_kernel_set_tx_length(struct socket *, struct rxrpc_call *, s64);
-bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *,
- u32 *);
-void rxrpc_kernel_probe_life(struct socket *, struct rxrpc_call *);
+bool rxrpc_kernel_check_life(const struct socket *, const struct rxrpc_call *);
u32 rxrpc_kernel_get_epoch(struct socket *, struct rxrpc_call *);
bool rxrpc_kernel_get_reply_time(struct socket *, struct rxrpc_call *,
ktime_t *);
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 3426d6dacc45..17e10fba2152 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -41,6 +41,10 @@ struct unix_skb_parms {
u32 consumed;
} __randomize_layout;
+struct scm_stat {
+ u32 nr_fds;
+};
+
#define UNIXCB(skb) (*(struct unix_skb_parms *)&((skb)->cb))
#define unix_state_lock(s) spin_lock(&unix_sk(s)->lock)
@@ -65,6 +69,7 @@ struct unix_sock {
#define UNIX_GC_MAYBE_CYCLE 1
struct socket_wq peer_wq;
wait_queue_entry_t peer_wake;
+ struct scm_stat scm_stat;
};
static inline struct unix_sock *unix_sk(const struct sock *sk)
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index 80ea0f93d3f7..b1c717286993 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -10,7 +10,7 @@
#include <linux/kernel.h>
#include <linux/workqueue.h>
-#include <linux/vm_sockets.h>
+#include <uapi/linux/vm_sockets.h>
#include "vsock_addr.h"
@@ -27,6 +27,7 @@ extern spinlock_t vsock_table_lock;
struct vsock_sock {
/* sk must be the first member. */
struct sock sk;
+ const struct vsock_transport *transport;
struct sockaddr_vm local_addr;
struct sockaddr_vm remote_addr;
/* Links for the global tables of bound and connected sockets. */
@@ -64,16 +65,18 @@ struct vsock_sock {
bool sent_request;
bool ignore_connecting_rst;
+ /* Protected by lock_sock(sk) */
+ u64 buffer_size;
+ u64 buffer_min_size;
+ u64 buffer_max_size;
+
/* Private to transport. */
void *trans;
};
s64 vsock_stream_has_data(struct vsock_sock *vsk);
s64 vsock_stream_has_space(struct vsock_sock *vsk);
-struct sock *__vsock_create(struct net *net,
- struct socket *sock,
- struct sock *parent,
- gfp_t priority, unsigned short type, int kern);
+struct sock *vsock_create_connected(struct sock *parent);
/**** TRANSPORT ****/
@@ -88,7 +91,19 @@ struct vsock_transport_send_notify_data {
u64 data2; /* Transport-defined. */
};
+/* Transport features flags */
+/* Transport provides host->guest communication */
+#define VSOCK_TRANSPORT_F_H2G 0x00000001
+/* Transport provides guest->host communication */
+#define VSOCK_TRANSPORT_F_G2H 0x00000002
+/* Transport provides DGRAM communication */
+#define VSOCK_TRANSPORT_F_DGRAM 0x00000004
+/* Transport provides local (loopback) communication */
+#define VSOCK_TRANSPORT_F_LOCAL 0x00000008
+
struct vsock_transport {
+ struct module *module;
+
/* Initialize/tear-down socket. */
int (*init)(struct vsock_sock *, struct vsock_sock *);
void (*destruct)(struct vsock_sock *);
@@ -139,33 +154,23 @@ struct vsock_transport {
struct vsock_transport_send_notify_data *);
int (*notify_send_post_enqueue)(struct vsock_sock *, ssize_t,
struct vsock_transport_send_notify_data *);
+ /* sk_lock held by the caller */
+ void (*notify_buffer_size)(struct vsock_sock *, u64 *);
/* Shutdown. */
int (*shutdown)(struct vsock_sock *, int);
- /* Buffer sizes. */
- void (*set_buffer_size)(struct vsock_sock *, u64);
- void (*set_min_buffer_size)(struct vsock_sock *, u64);
- void (*set_max_buffer_size)(struct vsock_sock *, u64);
- u64 (*get_buffer_size)(struct vsock_sock *);
- u64 (*get_min_buffer_size)(struct vsock_sock *);
- u64 (*get_max_buffer_size)(struct vsock_sock *);
-
/* Addressing. */
u32 (*get_local_cid)(void);
};
/**** CORE ****/
-int __vsock_core_init(const struct vsock_transport *t, struct module *owner);
-static inline int vsock_core_init(const struct vsock_transport *t)
-{
- return __vsock_core_init(t, THIS_MODULE);
-}
-void vsock_core_exit(void);
+int vsock_core_register(const struct vsock_transport *t, int features);
+void vsock_core_unregister(const struct vsock_transport *t);
/* The transport may downcast this to access transport-specific functions */
-const struct vsock_transport *vsock_core_get_transport(void);
+const struct vsock_transport *vsock_core_get_transport(struct vsock_sock *vsk);
/**** UTILS ****/
@@ -193,6 +198,8 @@ struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
struct sockaddr_vm *dst);
void vsock_remove_sock(struct vsock_sock *vsk);
void vsock_for_each_connected_socket(void (*fn)(struct sock *sk));
+int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk);
+bool vsock_find_cid(unsigned int cid);
/**** TAP ****/
diff --git a/include/net/arp.h b/include/net/arp.h
index c8f580a0e6b1..4950191f6b2b 100644
--- a/include/net/arp.h
+++ b/include/net/arp.h
@@ -57,8 +57,8 @@ static inline void __ipv4_confirm_neigh(struct net_device *dev, u32 key)
unsigned long now = jiffies;
/* avoid dirtying neighbour */
- if (n->confirmed != now)
- n->confirmed = now;
+ if (READ_ONCE(n->confirmed) != now)
+ WRITE_ONCE(n->confirmed, now);
}
rcu_read_unlock_bh();
}
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index fabee6db0abb..e42bb8e03c09 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -129,6 +129,8 @@ void bt_warn(const char *fmt, ...);
__printf(1, 2)
void bt_err(const char *fmt, ...);
__printf(1, 2)
+void bt_warn_ratelimited(const char *fmt, ...);
+__printf(1, 2)
void bt_err_ratelimited(const char *fmt, ...);
#define BT_INFO(fmt, ...) bt_info(fmt "\n", ##__VA_ARGS__)
@@ -136,8 +138,6 @@ void bt_err_ratelimited(const char *fmt, ...);
#define BT_ERR(fmt, ...) bt_err(fmt "\n", ##__VA_ARGS__)
#define BT_DBG(fmt, ...) pr_debug(fmt "\n", ##__VA_ARGS__)
-#define BT_ERR_RATELIMITED(fmt, ...) bt_err_ratelimited(fmt "\n", ##__VA_ARGS__)
-
#define bt_dev_info(hdev, fmt, ...) \
BT_INFO("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
#define bt_dev_warn(hdev, fmt, ...) \
@@ -147,8 +147,10 @@ void bt_err_ratelimited(const char *fmt, ...);
#define bt_dev_dbg(hdev, fmt, ...) \
BT_DBG("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+#define bt_dev_warn_ratelimited(hdev, fmt, ...) \
+ bt_warn_ratelimited("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
#define bt_dev_err_ratelimited(hdev, fmt, ...) \
- BT_ERR_RATELIMITED("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
+ bt_err_ratelimited("%s: " fmt, (hdev)->name, ##__VA_ARGS__)
/* Connection and socket states */
enum {
diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h
index 5bc1e30dedde..6293bdd7d862 100644
--- a/include/net/bluetooth/hci.h
+++ b/include/net/bluetooth/hci.h
@@ -27,6 +27,7 @@
#define HCI_MAX_ACL_SIZE 1024
#define HCI_MAX_SCO_SIZE 255
+#define HCI_MAX_ISO_SIZE 251
#define HCI_MAX_EVENT_SIZE 260
#define HCI_MAX_FRAME_SIZE (HCI_MAX_ACL_SIZE + 4)
@@ -303,6 +304,7 @@ enum {
#define HCI_ACLDATA_PKT 0x02
#define HCI_SCODATA_PKT 0x03
#define HCI_EVENT_PKT 0x04
+#define HCI_ISODATA_PKT 0x05
#define HCI_DIAG_PKT 0xf0
#define HCI_VENDOR_PKT 0xff
@@ -352,6 +354,15 @@ enum {
#define ACL_ACTIVE_BCAST 0x04
#define ACL_PICO_BCAST 0x08
+/* ISO PB flags */
+#define ISO_START 0x00
+#define ISO_CONT 0x01
+#define ISO_SINGLE 0x02
+#define ISO_END 0x03
+
+/* ISO TS flags */
+#define ISO_TS 0x01
+
/* Baseband links */
#define SCO_LINK 0x00
#define ACL_LINK 0x01
@@ -359,6 +370,7 @@ enum {
/* Low Energy links do not have defined link type. Use invented one */
#define LE_LINK 0x80
#define AMP_LINK 0x81
+#define ISO_LINK 0x82
#define INVALID_LINK 0xff
/* LMP features */
@@ -440,6 +452,8 @@ enum {
#define HCI_LE_PHY_2M 0x01
#define HCI_LE_PHY_CODED 0x08
#define HCI_LE_CHAN_SEL_ALG2 0x40
+#define HCI_LE_CIS_MASTER 0x10
+#define HCI_LE_CIS_SLAVE 0x20
/* Connection modes */
#define HCI_CM_ACTIVE 0x0000
@@ -1718,6 +1732,86 @@ struct hci_cp_le_set_adv_set_rand_addr {
bdaddr_t bdaddr;
} __packed;
+#define HCI_OP_LE_READ_BUFFER_SIZE_V2 0x2060
+struct hci_rp_le_read_buffer_size_v2 {
+ __u8 status;
+ __le16 acl_mtu;
+ __u8 acl_max_pkt;
+ __le16 iso_mtu;
+ __u8 iso_max_pkt;
+} __packed;
+
+#define HCI_OP_LE_READ_ISO_TX_SYNC 0x2061
+struct hci_cp_le_read_iso_tx_sync {
+ __le16 handle;
+} __packed;
+
+struct hci_rp_le_read_iso_tx_sync {
+ __u8 status;
+ __le16 handle;
+ __le16 seq;
+ __le32 imestamp;
+ __u8 offset[3];
+} __packed;
+
+#define HCI_OP_LE_SET_CIG_PARAMS 0x2062
+struct hci_cis_params {
+ __u8 cis_id;
+ __le16 m_sdu;
+ __le16 s_sdu;
+ __u8 m_phy;
+ __u8 s_phy;
+ __u8 m_rtn;
+ __u8 s_rtn;
+} __packed;
+
+struct hci_cp_le_set_cig_params {
+ __u8 cig_id;
+ __u8 m_interval[3];
+ __u8 s_interval[3];
+ __u8 sca;
+ __u8 packing;
+ __u8 framing;
+ __le16 m_latency;
+ __le16 s_latency;
+ __u8 num_cis;
+ struct hci_cis_params cis[0];
+} __packed;
+
+struct hci_rp_le_set_cig_params {
+ __u8 status;
+ __u8 cig_id;
+ __u8 num_handles;
+ __le16 handle[0];
+} __packed;
+
+#define HCI_OP_LE_CREATE_CIS 0x2064
+struct hci_cis {
+ __le16 cis_handle;
+ __le16 acl_handle;
+} __packed;
+
+struct hci_cp_le_create_cis {
+ __u8 num_cis;
+ struct hci_cis cis[0];
+} __packed;
+
+#define HCI_OP_LE_REMOVE_CIG 0x2065
+struct hci_cp_le_remove_cig {
+ __u8 cig_id;
+} __packed;
+
+#define HCI_OP_LE_ACCEPT_CIS 0x2066
+struct hci_cp_le_accept_cis {
+ __le16 handle;
+} __packed;
+
+#define HCI_OP_LE_REJECT_CIS 0x2067
+struct hci_cp_le_reject_cis {
+ __le16 handle;
+ __u8 reason;
+} __packed;
+
/* ---- HCI Events ---- */
#define HCI_EV_INQUIRY_COMPLETE 0x01
@@ -2186,6 +2280,14 @@ struct hci_ev_le_direct_adv_info {
__s8 rssi;
} __packed;
+#define HCI_EV_LE_PHY_UPDATE_COMPLETE 0x0c
+struct hci_ev_le_phy_update_complete {
+ __u8 status;
+ __le16 handle;
+ __u8 tx_phy;
+ __u8 rx_phy;
+} __packed;
+
#define HCI_EV_LE_EXT_ADV_REPORT 0x0d
struct hci_ev_le_ext_adv_report {
__le16 evt_type;
@@ -2226,6 +2328,34 @@ struct hci_evt_le_ext_adv_set_term {
__u8 num_evts;
} __packed;
+#define HCI_EVT_LE_CIS_ESTABLISHED 0x19
+struct hci_evt_le_cis_established {
+ __u8 status;
+ __le16 handle;
+ __u8 cig_sync_delay[3];
+ __u8 cis_sync_delay[3];
+ __u8 m_latency[3];
+ __u8 s_latency[3];
+ __u8 m_phy;
+ __u8 s_phy;
+ __u8 nse;
+ __u8 m_bn;
+ __u8 s_bn;
+ __u8 m_ft;
+ __u8 s_ft;
+ __le16 m_mtu;
+ __le16 s_mtu;
+ __le16 interval;
+} __packed;
+
+#define HCI_EVT_LE_CIS_REQ 0x1a
+struct hci_evt_le_cis_req {
+ __le16 acl_handle;
+ __le16 cis_handle;
+ __u8 cig_id;
+ __u8 cis_id;
+} __packed;
+
#define HCI_EV_VENDOR 0xff
/* Internal events generated by Bluetooth stack */
@@ -2254,6 +2384,7 @@ struct hci_ev_si_security {
#define HCI_EVENT_HDR_SIZE 2
#define HCI_ACL_HDR_SIZE 4
#define HCI_SCO_HDR_SIZE 3
+#define HCI_ISO_HDR_SIZE 4
struct hci_command_hdr {
__le16 opcode; /* OCF & OGF */
@@ -2275,6 +2406,30 @@ struct hci_sco_hdr {
__u8 dlen;
} __packed;
+struct hci_iso_hdr {
+ __le16 handle;
+ __le16 dlen;
+ __u8 data[0];
+} __packed;
+
+/* ISO data packet status flags */
+#define HCI_ISO_STATUS_VALID 0x00
+#define HCI_ISO_STATUS_INVALID 0x01
+#define HCI_ISO_STATUS_NOP 0x02
+
+#define HCI_ISO_DATA_HDR_SIZE 4
+struct hci_iso_data_hdr {
+ __le16 sn;
+ __le16 slen;
+};
+
+#define HCI_ISO_TS_DATA_HDR_SIZE 8
+struct hci_iso_ts_data_hdr {
+ __le32 ts;
+ __le16 sn;
+ __le16 slen;
+};
+
static inline struct hci_event_hdr *hci_event_hdr(const struct sk_buff *skb)
{
return (struct hci_event_hdr *) skb->data;
@@ -2300,4 +2455,14 @@ static inline struct hci_sco_hdr *hci_sco_hdr(const struct sk_buff *skb)
#define hci_handle(h) (h & 0x0fff)
#define hci_flags(h) (h >> 12)
+/* ISO handle and flags pack/unpack */
+#define hci_iso_flags_pb(f) (f & 0x0003)
+#define hci_iso_flags_ts(f) ((f >> 2) & 0x0001)
+#define hci_iso_flags_pack(pb, ts) ((pb & 0x03) | ((ts & 0x01) << 2))
+
+/* ISO data length and flags pack/unpack */
+#define hci_iso_data_len_pack(h, f) ((__u16) ((h) | ((f) << 14)))
+#define hci_iso_data_len(h) ((h) & 0x3fff)
+#define hci_iso_data_flags(h) ((h) >> 14)
+
#endif /* __HCI_H */
diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h
index b689aceb636b..89ecf0a80aa1 100644
--- a/include/net/bluetooth/hci_core.h
+++ b/include/net/bluetooth/hci_core.h
@@ -118,6 +118,13 @@ struct bt_uuid {
u8 svc_hint;
};
+struct blocked_key {
+ struct list_head list;
+ struct rcu_head rcu;
+ u8 type;
+ u8 val[16];
+};
+
struct smp_csrk {
bdaddr_t bdaddr;
u8 bdaddr_type;
@@ -397,6 +404,7 @@ struct hci_dev {
struct list_head le_conn_params;
struct list_head pend_le_conns;
struct list_head pend_le_reports;
+ struct list_head blocked_keys;
struct hci_dev_stats stat;
@@ -493,6 +501,8 @@ struct hci_conn {
__u16 le_supv_timeout;
__u8 le_adv_data[HCI_MAX_AD_LENGTH];
__u8 le_adv_data_len;
+ __u8 le_tx_phy;
+ __u8 le_rx_phy;
__s8 rssi;
__s8 tx_power;
__s8 max_tx_power;
@@ -1121,6 +1131,8 @@ struct smp_irk *hci_find_irk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
struct smp_irk *hci_add_irk(struct hci_dev *hdev, bdaddr_t *bdaddr,
u8 addr_type, u8 val[16], bdaddr_t *rpa);
void hci_remove_irk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type);
+bool hci_is_blocked_key(struct hci_dev *hdev, u8 type, u8 val[16]);
+void hci_blocked_keys_clear(struct hci_dev *hdev);
void hci_smp_irks_clear(struct hci_dev *hdev);
bool hci_bdaddr_is_paired(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type);
diff --git a/include/net/bluetooth/hci_mon.h b/include/net/bluetooth/hci_mon.h
index 240786b04a46..2d5fcda1bcd0 100644
--- a/include/net/bluetooth/hci_mon.h
+++ b/include/net/bluetooth/hci_mon.h
@@ -49,6 +49,8 @@ struct hci_mon_hdr {
#define HCI_MON_CTRL_CLOSE 15
#define HCI_MON_CTRL_COMMAND 16
#define HCI_MON_CTRL_EVENT 17
+#define HCI_MON_ISO_TX_PKT 18
+#define HCI_MON_ISO_RX_PKT 19
struct hci_mon_new_index {
__u8 type;
diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h
index 9cee7ddc6741..a90666af05bd 100644
--- a/include/net/bluetooth/mgmt.h
+++ b/include/net/bluetooth/mgmt.h
@@ -654,6 +654,23 @@ struct mgmt_cp_set_phy_confguration {
} __packed;
#define MGMT_SET_PHY_CONFIGURATION_SIZE 4
+#define MGMT_OP_SET_BLOCKED_KEYS 0x0046
+
+#define HCI_BLOCKED_KEY_TYPE_LINKKEY 0x00
+#define HCI_BLOCKED_KEY_TYPE_LTK 0x01
+#define HCI_BLOCKED_KEY_TYPE_IRK 0x02
+
+struct mgmt_blocked_key_info {
+ __u8 type;
+ __u8 val[16];
+} __packed;
+
+struct mgmt_cp_set_blocked_keys {
+ __le16 key_count;
+ struct mgmt_blocked_key_info keys[0];
+} __packed;
+#define MGMT_OP_SET_BLOCKED_KEYS_SIZE 2
+
#define MGMT_EV_CMD_COMPLETE 0x0001
struct mgmt_ev_cmd_complete {
__le16 opcode;
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index 4ab2c49423dc..f22bd6c838a3 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -565,6 +565,7 @@ struct vif_params {
* with the get_key() callback, must be in little endian,
* length given by @seq_len.
* @seq_len: length of @seq.
+ * @vlan_id: vlan_id for VLAN group key (if nonzero)
* @mode: key install mode (RX_TX, NO_TX or SET_TX)
*/
struct key_params {
@@ -572,6 +573,7 @@ struct key_params {
const u8 *seq;
int key_len;
int seq_len;
+ u16 vlan_id;
u32 cipher;
enum nl80211_key_mode mode;
};
@@ -1124,6 +1126,7 @@ struct sta_txpwr {
* (bitmask of BIT(%NL80211_STA_FLAG_...))
* @listen_interval: listen interval or -1 for no change
* @aid: AID or zero for no change
+ * @vlan_id: VLAN ID for station (if nonzero)
* @peer_aid: mesh peer AID or zero for no change
* @plink_action: plink action to take
* @plink_state: set the peer link state for a station
@@ -1159,6 +1162,7 @@ struct station_parameters {
u32 sta_modify_mask;
int listen_interval;
u16 aid;
+ u16 vlan_id;
u16 peer_aid;
u8 supported_rates_len;
u8 plink_action;
@@ -2602,6 +2606,13 @@ enum wiphy_params_flags {
#define IEEE80211_DEFAULT_AIRTIME_WEIGHT 256
+/* The per TXQ device queue limit in airtime */
+#define IEEE80211_DEFAULT_AQL_TXQ_LIMIT_L 5000
+#define IEEE80211_DEFAULT_AQL_TXQ_LIMIT_H 12000
+
+/* The per interface airtime threshold to switch to lower queue limit */
+#define IEEE80211_AQL_THRESHOLD 24000
+
/**
* struct cfg80211_pmksa - PMK Security Association
*
@@ -3537,6 +3548,9 @@ struct cfg80211_update_owe_info {
*
* @start_radar_detection: Start radar detection in the driver.
*
+ * @end_cac: End running CAC, probably because a related CAC
+ * was finished on another phy.
+ *
* @update_ft_ies: Provide updated Fast BSS Transition information to the
* driver. If the SME is in the driver/firmware, this information can be
* used in building Authentication and Reassociation Request frames.
@@ -3863,6 +3877,8 @@ struct cfg80211_ops {
struct net_device *dev,
struct cfg80211_chan_def *chandef,
u32 cac_time_ms);
+ void (*end_cac)(struct wiphy *wiphy,
+ struct net_device *dev);
int (*update_ft_ies)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_update_ft_ies_params *ftie);
int (*crit_proto_start)(struct wiphy *wiphy,
@@ -6593,7 +6609,7 @@ struct cfg80211_roam_info {
* time it is accessed in __cfg80211_roamed() due to delay in scheduling
* rdev->event_work. In case of any failures, the reference is released
* either in cfg80211_roamed() or in __cfg80211_romed(), Otherwise, it will be
- * released while diconneting from the current bss.
+ * released while disconnecting from the current bss.
*/
void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info,
gfp_t gfp);
diff --git a/include/net/compat.h b/include/net/compat.h
index f277653c7e17..e341260642fe 100644
--- a/include/net/compat.h
+++ b/include/net/compat.h
@@ -38,6 +38,9 @@ struct compat_cmsghdr {
#define compat_mmsghdr mmsghdr
#endif /* defined(CONFIG_COMPAT) */
+int __get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg,
+ struct sockaddr __user **save_addr, compat_uptr_t *ptr,
+ compat_size_t *len);
int get_compat_msghdr(struct msghdr *, struct compat_msghdr __user *,
struct sockaddr __user **, struct iovec **);
struct sock_fprog __user *get_compat_bpf_fprog(char __user *optval);
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 2116c88663a1..ce5cea428fdc 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -39,7 +39,8 @@ struct devlink {
possible_net_t _net;
struct mutex lock;
u8 reload_failed:1,
- reload_enabled:1;
+ reload_enabled:1,
+ registered:1;
char priv[0] __aligned(NETDEV_ALIGN);
};
@@ -401,6 +402,7 @@ enum devlink_param_generic_id {
DEVLINK_PARAM_GENERIC_ID_MSIX_VEC_PER_PF_MIN,
DEVLINK_PARAM_GENERIC_ID_FW_LOAD_POLICY,
DEVLINK_PARAM_GENERIC_ID_RESET_DEV_ON_DRV_PROBE,
+ DEVLINK_PARAM_GENERIC_ID_ENABLE_ROCE,
/* add new param generic ids above here*/
__DEVLINK_PARAM_GENERIC_ID_MAX,
@@ -435,6 +437,9 @@ enum devlink_param_generic_id {
"reset_dev_on_drv_probe"
#define DEVLINK_PARAM_GENERIC_RESET_DEV_ON_DRV_PROBE_TYPE DEVLINK_PARAM_TYPE_U8
+#define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_NAME "enable_roce"
+#define DEVLINK_PARAM_GENERIC_ENABLE_ROCE_TYPE DEVLINK_PARAM_TYPE_BOOL
+
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
{ \
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
@@ -480,6 +485,10 @@ enum devlink_param_generic_id {
#define DEVLINK_INFO_VERSION_GENERIC_FW_UNDI "fw.undi"
/* NCSI support/handler version */
#define DEVLINK_INFO_VERSION_GENERIC_FW_NCSI "fw.ncsi"
+/* FW parameter set id */
+#define DEVLINK_INFO_VERSION_GENERIC_FW_PSID "fw.psid"
+/* RoCE FW version */
+#define DEVLINK_INFO_VERSION_GENERIC_FW_ROCE "fw.roce"
struct devlink_region;
struct devlink_info_req;
@@ -507,11 +516,13 @@ enum devlink_health_reporter_state {
struct devlink_health_reporter_ops {
char *name;
int (*recover)(struct devlink_health_reporter *reporter,
- void *priv_ctx);
+ void *priv_ctx, struct netlink_ext_ack *extack);
int (*dump)(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg, void *priv_ctx);
+ struct devlink_fmsg *fmsg, void *priv_ctx,
+ struct netlink_ext_ack *extack);
int (*diagnose)(struct devlink_health_reporter *reporter,
- struct devlink_fmsg *fmsg);
+ struct devlink_fmsg *fmsg,
+ struct netlink_ext_ack *extack);
};
/**
@@ -555,7 +566,7 @@ struct devlink_trap {
};
/* All traps must be documented in
- * Documentation/networking/devlink-trap.rst
+ * Documentation/networking/devlink/devlink-trap.rst
*/
enum devlink_trap_generic_id {
DEVLINK_TRAP_GENERIC_ID_SMAC_MC,
@@ -567,6 +578,24 @@ enum devlink_trap_generic_id {
DEVLINK_TRAP_GENERIC_ID_BLACKHOLE_ROUTE,
DEVLINK_TRAP_GENERIC_ID_TTL_ERROR,
DEVLINK_TRAP_GENERIC_ID_TAIL_DROP,
+ DEVLINK_TRAP_GENERIC_ID_NON_IP_PACKET,
+ DEVLINK_TRAP_GENERIC_ID_UC_DIP_MC_DMAC,
+ DEVLINK_TRAP_GENERIC_ID_DIP_LB,
+ DEVLINK_TRAP_GENERIC_ID_SIP_MC,
+ DEVLINK_TRAP_GENERIC_ID_SIP_LB,
+ DEVLINK_TRAP_GENERIC_ID_CORRUPTED_IP_HDR,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_SIP_BC,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_RESERVED_SCOPE,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE,
+ DEVLINK_TRAP_GENERIC_ID_MTU_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_UNRESOLVED_NEIGH,
+ DEVLINK_TRAP_GENERIC_ID_RPF,
+ DEVLINK_TRAP_GENERIC_ID_REJECT_ROUTE,
+ DEVLINK_TRAP_GENERIC_ID_IPV4_LPM_UNICAST_MISS,
+ DEVLINK_TRAP_GENERIC_ID_IPV6_LPM_UNICAST_MISS,
+ DEVLINK_TRAP_GENERIC_ID_NON_ROUTABLE,
+ DEVLINK_TRAP_GENERIC_ID_DECAP_ERROR,
+ DEVLINK_TRAP_GENERIC_ID_OVERLAY_SMAC_MC,
/* Add new generic trap IDs above */
__DEVLINK_TRAP_GENERIC_ID_MAX,
@@ -574,12 +603,13 @@ enum devlink_trap_generic_id {
};
/* All trap groups must be documented in
- * Documentation/networking/devlink-trap.rst
+ * Documentation/networking/devlink/devlink-trap.rst
*/
enum devlink_trap_group_generic_id {
DEVLINK_TRAP_GROUP_GENERIC_ID_L2_DROPS,
DEVLINK_TRAP_GROUP_GENERIC_ID_L3_DROPS,
DEVLINK_TRAP_GROUP_GENERIC_ID_BUFFER_DROPS,
+ DEVLINK_TRAP_GROUP_GENERIC_ID_TUNNEL_DROPS,
/* Add new generic trap group IDs above */
__DEVLINK_TRAP_GROUP_GENERIC_ID_MAX,
@@ -605,6 +635,42 @@ enum devlink_trap_group_generic_id {
"ttl_value_is_too_small"
#define DEVLINK_TRAP_GENERIC_NAME_TAIL_DROP \
"tail_drop"
+#define DEVLINK_TRAP_GENERIC_NAME_NON_IP_PACKET \
+ "non_ip"
+#define DEVLINK_TRAP_GENERIC_NAME_UC_DIP_MC_DMAC \
+ "uc_dip_over_mc_dmac"
+#define DEVLINK_TRAP_GENERIC_NAME_DIP_LB \
+ "dip_is_loopback_address"
+#define DEVLINK_TRAP_GENERIC_NAME_SIP_MC \
+ "sip_is_mc"
+#define DEVLINK_TRAP_GENERIC_NAME_SIP_LB \
+ "sip_is_loopback_address"
+#define DEVLINK_TRAP_GENERIC_NAME_CORRUPTED_IP_HDR \
+ "ip_header_corrupted"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_SIP_BC \
+ "ipv4_sip_is_limited_bc"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_MC_DIP_RESERVED_SCOPE \
+ "ipv6_mc_dip_reserved_scope"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_MC_DIP_INTERFACE_LOCAL_SCOPE \
+ "ipv6_mc_dip_interface_local_scope"
+#define DEVLINK_TRAP_GENERIC_NAME_MTU_ERROR \
+ "mtu_value_is_too_small"
+#define DEVLINK_TRAP_GENERIC_NAME_UNRESOLVED_NEIGH \
+ "unresolved_neigh"
+#define DEVLINK_TRAP_GENERIC_NAME_RPF \
+ "mc_reverse_path_forwarding"
+#define DEVLINK_TRAP_GENERIC_NAME_REJECT_ROUTE \
+ "reject_route"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV4_LPM_UNICAST_MISS \
+ "ipv4_lpm_miss"
+#define DEVLINK_TRAP_GENERIC_NAME_IPV6_LPM_UNICAST_MISS \
+ "ipv6_lpm_miss"
+#define DEVLINK_TRAP_GENERIC_NAME_NON_ROUTABLE \
+ "non_routable_packet"
+#define DEVLINK_TRAP_GENERIC_NAME_DECAP_ERROR \
+ "decap_error"
+#define DEVLINK_TRAP_GENERIC_NAME_OVERLAY_SMAC_MC \
+ "overlay_smac_is_mc"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_L2_DROPS \
"l2_drops"
@@ -612,6 +678,8 @@ enum devlink_trap_group_generic_id {
"l3_drops"
#define DEVLINK_TRAP_GROUP_GENERIC_NAME_BUFFER_DROPS \
"buffer_drops"
+#define DEVLINK_TRAP_GROUP_GENERIC_NAME_TUNNEL_DROPS \
+ "tunnel_drops"
#define DEVLINK_TRAP_GENERIC(_type, _init_action, _id, _group, _metadata_cap) \
{ \
@@ -644,7 +712,7 @@ enum devlink_trap_group_generic_id {
}
struct devlink_ops {
- int (*reload_down)(struct devlink *devlink,
+ int (*reload_down)(struct devlink *devlink, bool netns_change,
struct netlink_ext_ack *extack);
int (*reload_up)(struct devlink *devlink,
struct netlink_ext_ack *extack);
@@ -772,6 +840,8 @@ static inline struct devlink *netdev_to_devlink(struct net_device *dev)
struct ib_device;
+struct net *devlink_net(const struct devlink *devlink);
+void devlink_net_set(struct devlink *devlink, struct net *net);
struct devlink *devlink_alloc(const struct devlink_ops *ops, size_t priv_size);
int devlink_register(struct devlink *devlink, struct device *dev);
void devlink_unregister(struct devlink *devlink);
@@ -884,7 +954,7 @@ struct devlink_region *devlink_region_create(struct devlink *devlink,
u32 region_max_snapshots,
u64 region_size);
void devlink_region_destroy(struct devlink_region *region);
-u32 devlink_region_shapshot_id_get(struct devlink *devlink);
+u32 devlink_region_snapshot_id_get(struct devlink *devlink);
int devlink_region_snapshot_create(struct devlink_region *region,
u8 *data, u32 snapshot_id,
devlink_snapshot_data_dest_t *data_destructor);
@@ -917,8 +987,6 @@ int devlink_fmsg_u8_put(struct devlink_fmsg *fmsg, u8 value);
int devlink_fmsg_u32_put(struct devlink_fmsg *fmsg, u32 value);
int devlink_fmsg_u64_put(struct devlink_fmsg *fmsg, u64 value);
int devlink_fmsg_string_put(struct devlink_fmsg *fmsg, const char *value);
-int devlink_fmsg_binary_put(struct devlink_fmsg *fmsg, const void *value,
- u16 value_len);
int devlink_fmsg_bool_pair_put(struct devlink_fmsg *fmsg, const char *name,
bool value);
@@ -931,7 +999,7 @@ int devlink_fmsg_u64_pair_put(struct devlink_fmsg *fmsg, const char *name,
int devlink_fmsg_string_pair_put(struct devlink_fmsg *fmsg, const char *name,
const char *value);
int devlink_fmsg_binary_pair_put(struct devlink_fmsg *fmsg, const char *name,
- const void *value, u16 value_len);
+ const void *value, u32 value_len);
struct devlink_health_reporter *
devlink_health_reporter_create(struct devlink *devlink,
@@ -948,6 +1016,8 @@ int devlink_health_report(struct devlink_health_reporter *reporter,
void
devlink_health_reporter_state_update(struct devlink_health_reporter *reporter,
enum devlink_health_reporter_state state);
+void
+devlink_health_reporter_recovery_done(struct devlink_health_reporter *reporter);
bool devlink_is_reload_failed(const struct devlink *devlink);
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 541fb514e31d..63495e3443ac 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -42,6 +42,8 @@ struct phylink_link_state;
#define DSA_TAG_PROTO_8021Q_VALUE 12
#define DSA_TAG_PROTO_SJA1105_VALUE 13
#define DSA_TAG_PROTO_KSZ8795_VALUE 14
+#define DSA_TAG_PROTO_OCELOT_VALUE 15
+#define DSA_TAG_PROTO_AR9331_VALUE 16
enum dsa_tag_protocol {
DSA_TAG_PROTO_NONE = DSA_TAG_PROTO_NONE_VALUE,
@@ -59,6 +61,8 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_8021Q = DSA_TAG_PROTO_8021Q_VALUE,
DSA_TAG_PROTO_SJA1105 = DSA_TAG_PROTO_SJA1105_VALUE,
DSA_TAG_PROTO_KSZ8795 = DSA_TAG_PROTO_KSZ8795_VALUE,
+ DSA_TAG_PROTO_OCELOT = DSA_TAG_PROTO_OCELOT_VALUE,
+ DSA_TAG_PROTO_AR9331 = DSA_TAG_PROTO_AR9331_VALUE,
};
struct packet_type;
@@ -86,7 +90,6 @@ struct dsa_device_ops {
struct dsa_skb_cb {
struct sk_buff *clone;
- bool deferred_xmit;
};
struct __dsa_skb_cb {
@@ -94,8 +97,6 @@ struct __dsa_skb_cb {
u8 priv[48 - sizeof(struct dsa_skb_cb)];
};
-#define __DSA_SKB_CB(skb) ((struct __dsa_skb_cb *)((skb)->cb))
-
#define DSA_SKB_CB(skb) ((struct dsa_skb_cb *)((skb)->cb))
#define DSA_SKB_CB_PRIV(skb) \
@@ -122,15 +123,11 @@ struct dsa_switch_tree {
*/
struct dsa_platform_data *pd;
- /*
- * The switch port to which the CPU is attached.
- */
- struct dsa_port *cpu_dp;
+ /* List of switch ports */
+ struct list_head ports;
- /*
- * Data for the individual switch chips.
- */
- struct dsa_switch *ds[DSA_MAX_SWITCHES];
+ /* List of DSA links composing the routing table */
+ struct list_head rtable;
};
/* TC matchall action types, only mirroring for now */
@@ -194,8 +191,7 @@ struct dsa_port {
struct phylink *pl;
struct phylink_config pl_config;
- struct work_struct xmit_work;
- struct sk_buff_head xmit_queue;
+ struct list_head list;
/*
* Give the switch driver somewhere to hang its per-port private data
@@ -212,9 +208,24 @@ struct dsa_port {
* Original copy of the master netdev net_device_ops
*/
const struct net_device_ops *orig_ndo_ops;
+
+ bool setup;
+};
+
+/* TODO: ideally DSA ports would have a single dp->link_dp member,
+ * and no dst->rtable nor this struct dsa_link would be needed,
+ * but this would require some more complex tree walking,
+ * so keep it stupid at the moment and list them all.
+ */
+struct dsa_link {
+ struct dsa_port *dp;
+ struct dsa_port *link_dp;
+ struct list_head list;
};
struct dsa_switch {
+ bool setup;
+
struct device *dev;
/*
@@ -243,13 +254,6 @@ struct dsa_switch {
const struct dsa_switch_ops *ops;
/*
- * An array of which element [a] indicates which port on this
- * switch should be used to send packets to that are destined
- * for switch a. Can be NULL if there is only one switch chip.
- */
- s8 rtable[DSA_MAX_SWITCHES];
-
- /*
* Slave mii_bus and devices for the individual ports.
*/
u32 phys_mii_mask;
@@ -275,14 +279,24 @@ struct dsa_switch {
*/
bool vlan_filtering;
- /* Dynamically allocated ports, keep last */
+ /* MAC PCS does not provide link state change interrupt, and requires
+ * polling. Flag passed on to PHYLINK.
+ */
+ bool pcs_poll;
+
size_t num_ports;
- struct dsa_port ports[];
};
-static inline const struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
+static inline struct dsa_port *dsa_to_port(struct dsa_switch *ds, int p)
{
- return &ds->ports[p];
+ struct dsa_switch_tree *dst = ds->dst;
+ struct dsa_port *dp;
+
+ list_for_each_entry(dp, &dst->ports, list)
+ if (dp->ds == ds && dp->index == p)
+ return dp;
+
+ return NULL;
}
static inline bool dsa_is_unused_port(struct dsa_switch *ds, int p)
@@ -317,6 +331,19 @@ static inline u32 dsa_user_ports(struct dsa_switch *ds)
return mask;
}
+/* Return the local port used to reach an arbitrary switch device */
+static inline unsigned int dsa_routing_port(struct dsa_switch *ds, int device)
+{
+ struct dsa_switch_tree *dst = ds->dst;
+ struct dsa_link *dl;
+
+ list_for_each_entry(dl, &dst->rtable, list)
+ if (dl->dp->ds == ds && dl->link_dp->ds->index == device)
+ return dl->dp->index;
+
+ return ds->num_ports;
+}
+
/* Return the local port used to reach an arbitrary switch port */
static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device,
int port)
@@ -324,7 +351,7 @@ static inline unsigned int dsa_towards_port(struct dsa_switch *ds, int device,
if (device == ds->index)
return port;
else
- return ds->rtable[device];
+ return dsa_routing_port(ds, device);
}
/* Return the local port used to reach the dedicated CPU port */
@@ -353,7 +380,8 @@ typedef int dsa_fdb_dump_cb_t(const unsigned char *addr, u16 vid,
bool is_static, void *data);
struct dsa_switch_ops {
enum dsa_tag_protocol (*get_tag_protocol)(struct dsa_switch *ds,
- int port);
+ int port,
+ enum dsa_tag_protocol mprot);
int (*setup)(struct dsa_switch *ds);
void (*teardown)(struct dsa_switch *ds);
@@ -538,11 +566,45 @@ struct dsa_switch_ops {
bool (*port_rxtstamp)(struct dsa_switch *ds, int port,
struct sk_buff *skb, unsigned int type);
- /*
- * Deferred frame Tx
- */
- netdev_tx_t (*port_deferred_xmit)(struct dsa_switch *ds, int port,
- struct sk_buff *skb);
+ /* Devlink parameters */
+ int (*devlink_param_get)(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+ int (*devlink_param_set)(struct dsa_switch *ds, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+};
+
+#define DSA_DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes) \
+ DEVLINK_PARAM_DRIVER(_id, _name, _type, _cmodes, \
+ dsa_devlink_param_get, dsa_devlink_param_set, NULL)
+
+int dsa_devlink_param_get(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int dsa_devlink_param_set(struct devlink *dl, u32 id,
+ struct devlink_param_gset_ctx *ctx);
+int dsa_devlink_params_register(struct dsa_switch *ds,
+ const struct devlink_param *params,
+ size_t params_count);
+void dsa_devlink_params_unregister(struct dsa_switch *ds,
+ const struct devlink_param *params,
+ size_t params_count);
+int dsa_devlink_resource_register(struct dsa_switch *ds,
+ const char *resource_name,
+ u64 resource_size,
+ u64 resource_id,
+ u64 parent_resource_id,
+ const struct devlink_resource_size_params *size_params);
+
+void dsa_devlink_resources_unregister(struct dsa_switch *ds);
+
+void dsa_devlink_resource_occ_get_register(struct dsa_switch *ds,
+ u64 resource_id,
+ devlink_resource_occ_get_t *occ_get,
+ void *occ_get_priv);
+void dsa_devlink_resource_occ_get_unregister(struct dsa_switch *ds,
+ u64 resource_id);
+
+struct dsa_devlink_priv {
+ struct dsa_switch *ds;
};
struct dsa_switch_driver {
@@ -570,7 +632,6 @@ static inline bool dsa_can_decode(const struct sk_buff *skb,
return false;
}
-struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n);
void dsa_unregister_switch(struct dsa_switch *ds);
int dsa_register_switch(struct dsa_switch *ds);
#ifdef CONFIG_PM_SLEEP
diff --git a/include/net/dsfield.h b/include/net/dsfield.h
index 1a245ee10c95..a59a57ffc546 100644
--- a/include/net/dsfield.h
+++ b/include/net/dsfield.h
@@ -21,7 +21,7 @@ static inline __u8 ipv4_get_dsfield(const struct iphdr *iph)
static inline __u8 ipv6_get_dsfield(const struct ipv6hdr *ipv6h)
{
- return ntohs(*(const __be16 *)ipv6h) >> 4;
+ return ntohs(*(__force const __be16 *)ipv6h) >> 4;
}
diff --git a/include/net/dst.h b/include/net/dst.h
index fe62fe2eb781..3448cf865ede 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -82,7 +82,7 @@ struct dst_entry {
struct dst_metrics {
u32 metrics[RTAX_MAX];
refcount_t refcnt;
-};
+} __aligned(4); /* Low pointer bits contain DST_METRICS_FLAGS */
extern const struct dst_metrics dst_default_metrics;
u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old);
@@ -516,7 +516,16 @@ static inline void skb_dst_update_pmtu(struct sk_buff *skb, u32 mtu)
struct dst_entry *dst = skb_dst(skb);
if (dst && dst->ops->update_pmtu)
- dst->ops->update_pmtu(dst, NULL, skb, mtu);
+ dst->ops->update_pmtu(dst, NULL, skb, mtu, true);
+}
+
+/* update dst pmtu but not do neighbor confirm */
+static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
+{
+ struct dst_entry *dst = skb_dst(skb);
+
+ if (dst && dst->ops->update_pmtu)
+ dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
}
static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
@@ -526,7 +535,7 @@ static inline void skb_tunnel_check_pmtu(struct sk_buff *skb,
u32 encap_mtu = dst_mtu(encap_dst);
if (skb->len > encap_mtu - headroom)
- skb_dst_update_pmtu(skb, encap_mtu - headroom);
+ skb_dst_update_pmtu_no_confirm(skb, encap_mtu - headroom);
}
#endif /* _NET_DST_H */
diff --git a/include/net/dst_ops.h b/include/net/dst_ops.h
index 5ec645f27ee3..443863c7b8da 100644
--- a/include/net/dst_ops.h
+++ b/include/net/dst_ops.h
@@ -27,7 +27,8 @@ struct dst_ops {
struct dst_entry * (*negative_advice)(struct dst_entry *);
void (*link_failure)(struct sk_buff *);
void (*update_pmtu)(struct dst_entry *dst, struct sock *sk,
- struct sk_buff *skb, u32 mtu);
+ struct sk_buff *skb, u32 mtu,
+ bool confirm_neigh);
void (*redirect)(struct dst_entry *dst, struct sock *sk,
struct sk_buff *skb);
int (*local_out)(struct net *net, struct sock *sk, struct sk_buff *skb);
diff --git a/include/net/espintcp.h b/include/net/espintcp.h
new file mode 100644
index 000000000000..dd7026a00066
--- /dev/null
+++ b/include/net/espintcp.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_ESPINTCP_H
+#define _NET_ESPINTCP_H
+
+#include <net/strparser.h>
+#include <linux/skmsg.h>
+
+void __init espintcp_init(void);
+
+int espintcp_push_skb(struct sock *sk, struct sk_buff *skb);
+int espintcp_queue_out(struct sock *sk, struct sk_buff *skb);
+bool tcp_is_ulp_esp(struct sock *sk);
+
+struct espintcp_msg {
+ struct sk_buff *skb;
+ struct sk_msg skmsg;
+ int offset;
+ int len;
+};
+
+struct espintcp_ctx {
+ struct strparser strp;
+ struct sk_buff_head ike_queue;
+ struct sk_buff_head out_queue;
+ struct espintcp_msg partial;
+ void (*saved_data_ready)(struct sock *sk);
+ void (*saved_write_space)(struct sock *sk);
+ struct work_struct work;
+ bool tx_running;
+};
+
+static inline struct espintcp_ctx *espintcp_getctx(const struct sock *sk)
+{
+ struct inet_connection_sock *icsk = inet_csk(sk);
+
+ /* RCU is only needed for diag */
+ return (__force void *)icsk->icsk_ulp_data;
+}
+#endif
diff --git a/include/net/fib_notifier.h b/include/net/fib_notifier.h
index c49d7bfb5c30..6d59221ff05a 100644
--- a/include/net/fib_notifier.h
+++ b/include/net/fib_notifier.h
@@ -8,7 +8,6 @@
struct module;
struct fib_notifier_info {
- struct net *net;
int family;
struct netlink_ext_ack *extack;
};
@@ -30,19 +29,21 @@ struct fib_notifier_ops {
int family;
struct list_head list;
unsigned int (*fib_seq_read)(struct net *net);
- int (*fib_dump)(struct net *net, struct notifier_block *nb);
+ int (*fib_dump)(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
struct module *owner;
struct rcu_head rcu;
};
-int call_fib_notifier(struct notifier_block *nb, struct net *net,
+int call_fib_notifier(struct notifier_block *nb,
enum fib_event_type event_type,
struct fib_notifier_info *info);
int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
struct fib_notifier_info *info);
-int register_fib_notifier(struct notifier_block *nb,
- void (*cb)(struct notifier_block *nb));
-int unregister_fib_notifier(struct notifier_block *nb);
+int register_fib_notifier(struct net *net, struct notifier_block *nb,
+ void (*cb)(struct notifier_block *nb),
+ struct netlink_ext_ack *extack);
+int unregister_fib_notifier(struct net *net, struct notifier_block *nb);
struct fib_notifier_ops *
fib_notifier_ops_register(const struct fib_notifier_ops *tmpl, struct net *net);
void fib_notifier_ops_unregister(struct fib_notifier_ops *ops);
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 20dcadd8eed9..a259050f84af 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -108,6 +108,7 @@ struct fib_rule_notifier_info {
[FRA_OIFNAME] = { .type = NLA_STRING, .len = IFNAMSIZ - 1 }, \
[FRA_PRIORITY] = { .type = NLA_U32 }, \
[FRA_FWMARK] = { .type = NLA_U32 }, \
+ [FRA_TUN_ID] = { .type = NLA_U64 }, \
[FRA_FWMASK] = { .type = NLA_U32 }, \
[FRA_TABLE] = { .type = NLA_U32 }, \
[FRA_SUPPRESS_PREFIXLEN] = { .type = NLA_U32 }, \
@@ -194,7 +195,8 @@ int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
u32 flags);
bool fib_rule_matchall(const struct fib_rule *rule);
-int fib_rules_dump(struct net *net, struct notifier_block *nb, int family);
+int fib_rules_dump(struct net *net, struct notifier_block *nb, int family,
+ struct netlink_ext_ack *extack);
unsigned int fib_rules_seq_read(struct net *net, int family);
int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index 5cd12276ae21..628383915827 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -5,8 +5,11 @@
#include <linux/types.h>
#include <linux/in6.h>
#include <linux/siphash.h>
+#include <linux/string.h>
#include <uapi/linux/if_ether.h>
+struct sk_buff;
+
/**
* struct flow_dissector_key_control:
* @thoff: Transport header offset
@@ -31,7 +34,6 @@ enum flow_dissect_ret {
/**
* struct flow_dissector_key_basic:
- * @thoff: Transport header offset
* @n_proto: Network header protocol (eg. IPv4/IPv6)
* @ip_proto: Transport header protocol (eg. TCP/UDP)
*/
@@ -46,9 +48,14 @@ struct flow_dissector_key_tags {
};
struct flow_dissector_key_vlan {
- u16 vlan_id:12,
- vlan_dei:1,
- vlan_priority:3;
+ union {
+ struct {
+ u16 vlan_id:12,
+ vlan_dei:1,
+ vlan_priority:3;
+ };
+ __be16 vlan_tci;
+ };
__be16 vlan_tpid;
};
@@ -157,19 +164,16 @@ struct flow_dissector_key_ports {
/**
* flow_dissector_key_icmp:
- * @ports: type and code of ICMP header
- * icmp: ICMP type (high) and code (low)
* type: ICMP type
* code: ICMP code
+ * id: session identifier
*/
struct flow_dissector_key_icmp {
- union {
- __be16 icmp;
- struct {
- u8 type;
- u8 code;
- };
+ struct {
+ u8 type;
+ u8 code;
};
+ u16 id;
};
/**
@@ -204,9 +208,11 @@ struct flow_dissector_key_ip {
/**
* struct flow_dissector_key_meta:
* @ingress_ifindex: ingress ifindex
+ * @ingress_iftype: ingress interface type
*/
struct flow_dissector_key_meta {
int ingress_ifindex;
+ u16 ingress_iftype;
};
/**
@@ -229,6 +235,7 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_IPV4_ADDRS, /* struct flow_dissector_key_ipv4_addrs */
FLOW_DISSECTOR_KEY_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */
FLOW_DISSECTOR_KEY_PORTS, /* struct flow_dissector_key_ports */
+ FLOW_DISSECTOR_KEY_PORTS_RANGE, /* struct flow_dissector_key_ports */
FLOW_DISSECTOR_KEY_ICMP, /* struct flow_dissector_key_icmp */
FLOW_DISSECTOR_KEY_ETH_ADDRS, /* struct flow_dissector_key_eth_addrs */
FLOW_DISSECTOR_KEY_TIPC, /* struct flow_dissector_key_tipc */
@@ -283,6 +290,8 @@ struct flow_keys {
struct flow_dissector_key_vlan cvlan;
struct flow_dissector_key_keyid keyid;
struct flow_dissector_key_ports ports;
+ struct flow_dissector_key_icmp icmp;
+ /* 'addrs' must be the last member */
struct flow_dissector_key_addrs addrs;
};
@@ -316,6 +325,9 @@ static inline bool flow_keys_have_l4(const struct flow_keys *keys)
}
u32 flow_hash_from_keys(struct flow_keys *keys);
+void skb_flow_get_icmp_tci(const struct sk_buff *skb,
+ struct flow_dissector_key_icmp *key_icmp,
+ void *data, int thoff, int hlen);
static inline bool dissector_uses_key(const struct flow_dissector *flow_dissector,
enum flow_dissector_key_id key_id)
@@ -337,4 +349,12 @@ struct bpf_flow_dissector {
void *data_end;
};
+static inline void
+flow_dissector_init_keys(struct flow_dissector_key_control *key_control,
+ struct flow_dissector_key_basic *key_basic)
+{
+ memset(key_control, 0, sizeof(*key_control));
+ memset(key_basic, 0, sizeof(*key_basic));
+}
+
#endif
diff --git a/include/net/flow_offload.h b/include/net/flow_offload.h
index 86c567f531f3..c6f7bd22db60 100644
--- a/include/net/flow_offload.h
+++ b/include/net/flow_offload.h
@@ -380,19 +380,18 @@ static inline void flow_block_init(struct flow_block *flow_block)
typedef int flow_indr_block_bind_cb_t(struct net_device *dev, void *cb_priv,
enum tc_setup_type type, void *type_data);
-typedef void flow_indr_block_ing_cmd_t(struct net_device *dev,
- flow_indr_block_bind_cb_t *cb,
- void *cb_priv,
- enum flow_block_command command);
+typedef void flow_indr_block_cmd_t(struct net_device *dev,
+ flow_indr_block_bind_cb_t *cb, void *cb_priv,
+ enum flow_block_command command);
-struct flow_indr_block_ing_entry {
- flow_indr_block_ing_cmd_t *cb;
+struct flow_indr_block_entry {
+ flow_indr_block_cmd_t *cb;
struct list_head list;
};
-void flow_indr_add_block_ing_cb(struct flow_indr_block_ing_entry *entry);
+void flow_indr_add_block_cb(struct flow_indr_block_entry *entry);
-void flow_indr_del_block_ing_cb(struct flow_indr_block_ing_entry *entry);
+void flow_indr_del_block_cb(struct flow_indr_block_entry *entry);
int __flow_indr_block_cb_register(struct net_device *dev, void *cb_priv,
flow_indr_block_bind_cb_t *cb,
diff --git a/include/net/garp.h b/include/net/garp.h
index c41833bd4590..4d9a0c6a2e5f 100644
--- a/include/net/garp.h
+++ b/include/net/garp.h
@@ -37,7 +37,7 @@ struct garp_skb_cb {
static inline struct garp_skb_cb *garp_cb(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct garp_skb_cb) >
- FIELD_SIZEOF(struct sk_buff, cb));
+ sizeof_field(struct sk_buff, cb));
return (struct garp_skb_cb *)skb->cb;
}
diff --git a/include/net/gen_stats.h b/include/net/gen_stats.h
index ca23860adbb9..1424e02cef90 100644
--- a/include/net/gen_stats.h
+++ b/include/net/gen_stats.h
@@ -7,6 +7,12 @@
#include <linux/rtnetlink.h>
#include <linux/pkt_sched.h>
+/* Note: this used to be in include/uapi/linux/gen_stats.h */
+struct gnet_stats_basic_packed {
+ __u64 bytes;
+ __u64 packets;
+};
+
struct gnet_stats_basic_cpu {
struct gnet_stats_basic_packed bstats;
struct u64_stats_sync syncp;
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index 9292f1c588b7..74950663bb00 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -75,8 +75,6 @@ struct genl_family {
struct module *module;
};
-struct nlattr **genl_family_attrbuf(const struct genl_family *family);
-
/**
* struct genl_info - receiving information
* @snd_seq: sending sequence number
@@ -128,6 +126,24 @@ enum genl_validate_flags {
};
/**
+ * struct genl_info - info that is available during dumpit op call
+ * @family: generic netlink family - for internal genl code usage
+ * @ops: generic netlink ops - for internal genl code usage
+ * @attrs: netlink attributes
+ */
+struct genl_dumpit_info {
+ const struct genl_family *family;
+ const struct genl_ops *ops;
+ struct nlattr **attrs;
+};
+
+static inline const struct genl_dumpit_info *
+genl_dumpit_info(struct netlink_callback *cb)
+{
+ return cb->data;
+}
+
+/**
* struct genl_ops - generic netlink operations
* @cmd: command identifier
* @internal_flags: flags used by the family
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 5d4bfdba9adf..9ac2d2672a93 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -43,6 +43,12 @@ static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32
__icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
}
+#if IS_ENABLED(CONFIG_NF_NAT)
+void icmp_ndo_send(struct sk_buff *skb_in, int type, int code, __be32 info);
+#else
+#define icmp_ndo_send icmp_send
+#endif
+
int icmp_rcv(struct sk_buff *skb);
int icmp_err(struct sk_buff *skb, u32 info);
int icmp_init(void);
diff --git a/include/net/inet_hashtables.h b/include/net/inet_hashtables.h
index af2b4c065a04..d0019d3395cf 100644
--- a/include/net/inet_hashtables.h
+++ b/include/net/inet_hashtables.h
@@ -103,13 +103,19 @@ struct inet_bind_hashbucket {
struct hlist_head chain;
};
-/*
- * Sockets can be hashed in established or listening table
+/* Sockets can be hashed in established or listening table.
+ * We must use different 'nulls' end-of-chain value for all hash buckets :
+ * A socket might transition from ESTABLISH to LISTEN state without
+ * RCU grace period. A lookup in ehash table needs to handle this case.
*/
+#define LISTENING_NULLS_BASE (1U << 29)
struct inet_listen_hashbucket {
spinlock_t lock;
unsigned int count;
- struct hlist_head head;
+ union {
+ struct hlist_head head;
+ struct hlist_nulls_head nulls_head;
+ };
};
/* This is for listening sockets, thus all sockets which possess wildcards. */
diff --git a/include/net/ip.h b/include/net/ip.h
index a2c61c36dc4a..5b317c9f4470 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -339,10 +339,10 @@ static inline u64 snmp_fold_field64(void __percpu *mib, int offt, size_t syncp_o
void inet_get_local_port_range(struct net *net, int *low, int *high);
#ifdef CONFIG_SYSCTL
-static inline int inet_is_local_reserved_port(struct net *net, int port)
+static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
{
if (!net->ipv4.sysctl_local_reserved_ports)
- return 0;
+ return false;
return test_bit(port, net->ipv4.sysctl_local_reserved_ports);
}
@@ -351,20 +351,20 @@ static inline bool sysctl_dev_name_is_allowed(const char *name)
return strcmp(name, "default") != 0 && strcmp(name, "all") != 0;
}
-static inline int inet_prot_sock(struct net *net)
+static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
{
- return net->ipv4.sysctl_ip_prot_sock;
+ return port < net->ipv4.sysctl_ip_prot_sock;
}
#else
-static inline int inet_is_local_reserved_port(struct net *net, int port)
+static inline bool inet_is_local_reserved_port(struct net *net, unsigned short port)
{
- return 0;
+ return false;
}
-static inline int inet_prot_sock(struct net *net)
+static inline bool inet_port_requires_bind_service(struct net *net, unsigned short port)
{
- return PROT_SOCK;
+ return port < PROT_SOCK;
}
#endif
@@ -760,4 +760,9 @@ int ip_misc_proc_init(void);
int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
struct netlink_ext_ack *extack);
+static inline bool inetdev_valid_mtu(unsigned int mtu)
+{
+ return likely(mtu >= IPV4_MIN_MTU);
+}
+
#endif /* _IP_H */
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index 4b5656c71abc..fd60a8ac02ee 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -90,7 +90,32 @@ struct fib6_gc_args {
#ifndef CONFIG_IPV6_SUBTREES
#define FIB6_SUBTREE(fn) NULL
+
+static inline bool fib6_routes_require_src(const struct net *net)
+{
+ return false;
+}
+
+static inline void fib6_routes_require_src_inc(struct net *net) {}
+static inline void fib6_routes_require_src_dec(struct net *net) {}
+
#else
+
+static inline bool fib6_routes_require_src(const struct net *net)
+{
+ return net->ipv6.fib6_routes_require_src > 0;
+}
+
+static inline void fib6_routes_require_src_inc(struct net *net)
+{
+ net->ipv6.fib6_routes_require_src++;
+}
+
+static inline void fib6_routes_require_src_dec(struct net *net)
+{
+ net->ipv6.fib6_routes_require_src--;
+}
+
#define FIB6_SUBTREE(fn) (rcu_dereference_protected((fn)->subtree, 1))
#endif
@@ -167,7 +192,9 @@ struct fib6_info {
dst_nopolicy:1,
dst_host:1,
fib6_destroying:1,
- unused:3;
+ offload:1,
+ trap:1,
+ unused:1;
struct rcu_head rcu;
struct nexthop *nh;
@@ -212,6 +239,11 @@ static inline struct inet6_dev *ip6_dst_idev(struct dst_entry *dst)
return ((struct rt6_info *)dst)->rt6i_idev;
}
+static inline bool fib6_requires_src(const struct fib6_info *rt)
+{
+ return rt->fib6_src.plen > 0;
+}
+
static inline void fib6_clean_expires(struct fib6_info *f6i)
{
f6i->fib6_flags &= ~RTF_EXPIRES;
@@ -299,6 +331,13 @@ static inline void fib6_info_release(struct fib6_info *f6i)
call_rcu(&f6i->rcu, fib6_info_destroy_rcu);
}
+static inline void fib6_info_hw_flags_set(struct fib6_info *f6i, bool offload,
+ bool trap)
+{
+ f6i->offload = offload;
+ f6i->trap = trap;
+}
+
enum fib6_walk_state {
#ifdef CONFIG_IPV6_SUBTREES
FWS_S,
@@ -457,6 +496,7 @@ int call_fib6_multipath_entry_notifiers(struct net *net,
struct fib6_info *rt,
unsigned int nsiblings,
struct netlink_ext_ack *extack);
+int call_fib6_entry_notifiers_replace(struct net *net, struct fib6_info *rt);
void fib6_rt_update(struct net *net, struct fib6_info *rt,
struct nl_info *info);
void inet6_rt_notify(int event, struct fib6_info *rt, struct nl_info *info,
@@ -478,7 +518,7 @@ struct ipv6_route_iter {
extern const struct seq_operations ipv6_route_seq_ops;
-int call_fib6_notifier(struct notifier_block *nb, struct net *net,
+int call_fib6_notifier(struct notifier_block *nb,
enum fib_event_type event_type,
struct fib_notifier_info *info);
int call_fib6_notifiers(struct net *net, enum fib_event_type event_type,
@@ -488,7 +528,8 @@ int __net_init fib6_notifier_init(struct net *net);
void __net_exit fib6_notifier_exit(struct net *net);
unsigned int fib6_tables_seq_read(struct net *net);
-int fib6_tables_dump(struct net *net, struct notifier_block *nb);
+int fib6_tables_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
void fib6_update_sernum(struct net *net, struct fib6_info *rt);
void fib6_update_sernum_upto_root(struct net *net, struct fib6_info *rt);
@@ -501,10 +542,16 @@ static inline bool fib6_metric_locked(struct fib6_info *f6i, int metric)
}
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
+static inline bool fib6_has_custom_rules(const struct net *net)
+{
+ return net->ipv6.fib6_has_custom_rules;
+}
+
int fib6_rules_init(void);
void fib6_rules_cleanup(void);
bool fib6_rule_default(const struct fib_rule *rule);
-int fib6_rules_dump(struct net *net, struct notifier_block *nb);
+int fib6_rules_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
unsigned int fib6_rules_seq_read(struct net *net);
static inline bool fib6_rules_early_flow_dissect(struct net *net,
@@ -525,6 +572,10 @@ static inline bool fib6_rules_early_flow_dissect(struct net *net,
return true;
}
#else
+static inline bool fib6_has_custom_rules(const struct net *net)
+{
+ return false;
+}
static inline int fib6_rules_init(void)
{
return 0;
@@ -537,7 +588,8 @@ static inline bool fib6_rule_default(const struct fib_rule *rule)
{
return true;
}
-static inline int fib6_rules_dump(struct net *net, struct notifier_block *nb)
+static inline int fib6_rules_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
return 0;
}
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index ab1ca9e238d2..6a1ae49809de 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -204,6 +204,18 @@ __be32 fib_result_prefsrc(struct net *net, struct fib_result *res);
#define FIB_RES_DEV(res) (FIB_RES_NHC(res)->nhc_dev)
#define FIB_RES_OIF(res) (FIB_RES_NHC(res)->nhc_oif)
+struct fib_rt_info {
+ struct fib_info *fi;
+ u32 tb_id;
+ __be32 dst;
+ int dst_len;
+ u8 tos;
+ u8 type;
+ u8 offload:1,
+ trap:1,
+ unused:6;
+};
+
struct fib_entry_notifier_info {
struct fib_notifier_info info; /* must be first */
u32 dst;
@@ -219,7 +231,7 @@ struct fib_nh_notifier_info {
struct fib_nh *fib_nh;
};
-int call_fib4_notifier(struct notifier_block *nb, struct net *net,
+int call_fib4_notifier(struct notifier_block *nb,
enum fib_event_type event_type,
struct fib_notifier_info *info);
int call_fib4_notifiers(struct net *net, enum fib_event_type event_type,
@@ -229,7 +241,8 @@ int __net_init fib4_notifier_init(struct net *net);
void __net_exit fib4_notifier_exit(struct net *net);
void fib_info_notify_update(struct net *net, struct nl_info *info);
-void fib_notify(struct net *net, struct notifier_block *nb);
+int fib_notify(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
struct fib_table {
struct hlist_node tb_hlist;
@@ -310,12 +323,18 @@ static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
return err;
}
+static inline bool fib4_has_custom_rules(const struct net *net)
+{
+ return false;
+}
+
static inline bool fib4_rule_default(const struct fib_rule *rule)
{
return true;
}
-static inline int fib4_rules_dump(struct net *net, struct notifier_block *nb)
+static inline int fib4_rules_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack)
{
return 0;
}
@@ -376,8 +395,14 @@ out:
return err;
}
+static inline bool fib4_has_custom_rules(const struct net *net)
+{
+ return net->ipv4.fib_has_custom_rules;
+}
+
bool fib4_rule_default(const struct fib_rule *rule);
-int fib4_rules_dump(struct net *net, struct notifier_block *nb);
+int fib4_rules_dump(struct net *net, struct notifier_block *nb,
+ struct netlink_ext_ack *extack);
unsigned int fib4_rules_seq_read(struct net *net);
static inline bool fib4_rules_early_flow_dissect(struct net *net,
@@ -451,6 +476,7 @@ int fib_nh_common_init(struct fib_nh_common *nhc, struct nlattr *fc_encap,
void fib_nh_common_release(struct fib_nh_common *nhc);
/* Exported by fib_trie.c */
+void fib_alias_hw_flags_set(struct net *net, const struct fib_rt_info *fri);
void fib_trie_init(void);
struct fib_table *fib_trie_table(u32 id, struct fib_table *alias);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index af645604f328..236503a50759 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -33,8 +33,8 @@
/* Used to memset ipv4 address padding. */
#define IP_TUNNEL_KEY_IPV4_PAD offsetofend(struct ip_tunnel_key, u.ipv4.dst)
#define IP_TUNNEL_KEY_IPV4_PAD_LEN \
- (FIELD_SIZEOF(struct ip_tunnel_key, u) - \
- FIELD_SIZEOF(struct ip_tunnel_key, u.ipv4))
+ (sizeof_field(struct ip_tunnel_key, u) - \
+ sizeof_field(struct ip_tunnel_key, u.ipv4))
struct ip_tunnel_key {
__be64 tun_id;
@@ -63,7 +63,7 @@ struct ip_tunnel_key {
/* Maximum tunnel options length. */
#define IP_TUNNEL_OPTS_MAX \
- GENMASK((FIELD_SIZEOF(struct ip_tunnel_info, \
+ GENMASK((sizeof_field(struct ip_tunnel_info, \
options_len) * BITS_PER_BYTE) - 1, 0)
struct ip_tunnel_info {
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 078887c8c586..83be2d93b407 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -1325,7 +1325,7 @@ void ip_vs_protocol_net_cleanup(struct netns_ipvs *ipvs);
void ip_vs_control_net_cleanup(struct netns_ipvs *ipvs);
void ip_vs_estimator_net_cleanup(struct netns_ipvs *ipvs);
void ip_vs_sync_net_cleanup(struct netns_ipvs *ipvs);
-void ip_vs_service_net_cleanup(struct netns_ipvs *ipvs);
+void ip_vs_service_nets_cleanup(struct list_head *net_list);
/* IPVS application functions
* (from ip_vs_app.c)
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 009605c56f20..cec1a54401f2 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -696,6 +696,11 @@ static inline bool ipv6_addr_v4mapped(const struct in6_addr *a)
cpu_to_be32(0x0000ffff))) == 0UL;
}
+static inline bool ipv6_addr_v4mapped_loopback(const struct in6_addr *a)
+{
+ return ipv6_addr_v4mapped(a) && ipv4_is_loopback(a->s6_addr32[3]);
+}
+
static inline u32 ipv6_portaddr_hash(const struct net *net,
const struct in6_addr *addr6,
unsigned int port)
@@ -1017,7 +1022,7 @@ static inline struct sk_buff *ip6_finish_skb(struct sock *sk)
int ip6_dst_lookup(struct net *net, struct sock *sk, struct dst_entry **dst,
struct flowi6 *fl6);
-struct dst_entry *ip6_dst_lookup_flow(const struct sock *sk, struct flowi6 *fl6,
+struct dst_entry *ip6_dst_lookup_flow(struct net *net, const struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst);
struct dst_entry *ip6_sk_dst_lookup_flow(struct sock *sk, struct flowi6 *fl6,
const struct in6_addr *final_dst,
@@ -1108,6 +1113,9 @@ int inet6_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
int inet6_hash_connect(struct inet_timewait_death_row *death_row,
struct sock *sk);
+int inet6_sendmsg(struct socket *sock, struct msghdr *msg, size_t size);
+int inet6_recvmsg(struct socket *sock, struct msghdr *msg, size_t size,
+ int flags);
/*
* reassembly.c
diff --git a/include/net/ipv6_stubs.h b/include/net/ipv6_stubs.h
index 5c93e942c50b..3e7d2c0e79ca 100644
--- a/include/net/ipv6_stubs.h
+++ b/include/net/ipv6_stubs.h
@@ -24,8 +24,10 @@ struct ipv6_stub {
const struct in6_addr *addr);
int (*ipv6_sock_mc_drop)(struct sock *sk, int ifindex,
const struct in6_addr *addr);
- int (*ipv6_dst_lookup)(struct net *net, struct sock *sk,
- struct dst_entry **dst, struct flowi6 *fl6);
+ struct dst_entry *(*ipv6_dst_lookup_flow)(struct net *net,
+ const struct sock *sk,
+ struct flowi6 *fl6,
+ const struct in6_addr *final_dst);
int (*ipv6_route_input)(struct sk_buff *skb);
struct fib6_table *(*fib6_get_table)(struct net *net, u32 id);
diff --git a/include/net/ipx.h b/include/net/ipx.h
index baf090390998..9d1342807b59 100644
--- a/include/net/ipx.h
+++ b/include/net/ipx.h
@@ -47,11 +47,6 @@ struct ipxhdr {
/* From af_ipx.c */
extern int sysctl_ipx_pprop_broadcasting;
-static __inline__ struct ipxhdr *ipx_hdr(struct sk_buff *skb)
-{
- return (struct ipxhdr *)skb_transport_header(skb);
-}
-
struct ipx_interface {
/* IPX address */
__be32 if_netnum;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index 523c6a09e1c8..77e6b5a83b06 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -312,7 +312,7 @@ struct ieee80211_vif_chanctx_switch {
* @BSS_CHANGED_KEEP_ALIVE: keep alive options (idle period or protected
* keep alive) changed.
* @BSS_CHANGED_MCAST_RATE: Multicast Rate setting changed for this interface
- * @BSS_CHANGED_FTM_RESPONDER: fime timing reasurement request responder
+ * @BSS_CHANGED_FTM_RESPONDER: fine timing measurement request responder
* functionality changed for this BSS (AP mode).
* @BSS_CHANGED_TWT: TWT status changed
* @BSS_CHANGED_HE_OBSS_PD: OBSS Packet Detection status changed.
@@ -967,6 +967,7 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate)
* @band: the band to transmit on (use for checking for races)
* @hw_queue: HW queue to put the frame on, skb_get_queue_mapping() gives the AC
* @ack_frame_id: internal frame ID for TX status, used internally
+ * @tx_time_est: TX time estimate in units of 4us, used internally
* @control: union part for control data
* @control.rates: TX rates array to try
* @control.rts_cts_rate_idx: rate for RTS or CTS
@@ -1003,11 +1004,11 @@ ieee80211_rate_get_vht_nss(const struct ieee80211_tx_rate *rate)
struct ieee80211_tx_info {
/* common information */
u32 flags;
- u8 band;
-
- u8 hw_queue;
-
- u16 ack_frame_id;
+ u32 band:3,
+ ack_frame_id:13,
+ hw_queue:4,
+ tx_time_est:10;
+ /* 2 free bits */
union {
struct {
@@ -1058,8 +1059,24 @@ struct ieee80211_tx_info {
};
};
+static inline u16
+ieee80211_info_set_tx_time_est(struct ieee80211_tx_info *info, u16 tx_time_est)
+{
+ /* We only have 10 bits in tx_time_est, so store airtime
+ * in increments of 4us and clamp the maximum to 2**12-1
+ */
+ info->tx_time_est = min_t(u16, tx_time_est, 4095) >> 2;
+ return info->tx_time_est << 2;
+}
+
+static inline u16
+ieee80211_info_get_tx_time_est(struct ieee80211_tx_info *info)
+{
+ return info->tx_time_est << 2;
+}
+
/**
- * struct ieee80211_tx_status - extended tx staus info for rate control
+ * struct ieee80211_tx_status - extended tx status info for rate control
*
* @sta: Station that the packet was transmitted for
* @info: Basic tx status information
@@ -1702,7 +1719,7 @@ struct wireless_dev *ieee80211_vif_to_wdev(struct ieee80211_vif *vif);
* %IEEE80211_KEY_FLAG_SW_MGMT_TX flag to encrypt such frames in SW.
* @IEEE80211_KEY_FLAG_GENERATE_IV_MGMT: This flag should be set by the
* driver for a CCMP/GCMP key to indicate that is requires IV generation
- * only for managment frames (MFP).
+ * only for management frames (MFP).
* @IEEE80211_KEY_FLAG_RESERVE_TAILROOM: This flag should be set by the
* driver for a key to indicate that sufficient tailroom must always
* be reserved for ICV or MIC, even when HW encryption is enabled.
@@ -1998,7 +2015,7 @@ struct ieee80211_sta {
*
* * If the skb is transmitted as part of a BA agreement, the
* A-MSDU maximal size is min(max_amsdu_len, 4065) bytes.
- * * If the skb is not part of a BA aggreement, the A-MSDU maximal
+ * * If the skb is not part of a BA agreement, the A-MSDU maximal
* size is min(max_amsdu_len, 7935) bytes.
*
* Both additional HT limits must be enforced by the low level
@@ -2626,7 +2643,7 @@ ieee80211_get_alt_retry_rate(const struct ieee80211_hw *hw,
* @hw: the hardware
* @skb: the skb
*
- * Free a transmit skb. Use this funtion when some failure
+ * Free a transmit skb. Use this function when some failure
* to transmit happened and thus status cannot be reported.
*/
void ieee80211_free_txskb(struct ieee80211_hw *hw, struct sk_buff *skb);
@@ -3095,7 +3112,9 @@ enum ieee80211_filter_flags {
*
* @IEEE80211_AMPDU_RX_START: start RX aggregation
* @IEEE80211_AMPDU_RX_STOP: stop RX aggregation
- * @IEEE80211_AMPDU_TX_START: start TX aggregation
+ * @IEEE80211_AMPDU_TX_START: start TX aggregation, the driver must either
+ * call ieee80211_start_tx_ba_cb_irqsafe() or return the special
+ * status %IEEE80211_AMPDU_TX_START_IMMEDIATE.
* @IEEE80211_AMPDU_TX_OPERATIONAL: TX aggregation has become operational
* @IEEE80211_AMPDU_TX_STOP_CONT: stop TX aggregation but continue transmitting
* queued packets, now unaggregated. After all packets are transmitted the
@@ -3119,6 +3138,8 @@ enum ieee80211_ampdu_mlme_action {
IEEE80211_AMPDU_TX_OPERATIONAL,
};
+#define IEEE80211_AMPDU_TX_START_IMMEDIATE 1
+
/**
* struct ieee80211_ampdu_params - AMPDU action parameters
*
@@ -3183,13 +3204,13 @@ enum ieee80211_rate_control_changed {
*
* With the support for multi channel contexts and multi channel operations,
* remain on channel operations might be limited/deferred/aborted by other
- * flows/operations which have higher priority (and vise versa).
+ * flows/operations which have higher priority (and vice versa).
* Specifying the ROC type can be used by devices to prioritize the ROC
* operations compared to other operations/flows.
*
* @IEEE80211_ROC_TYPE_NORMAL: There are no special requirements for this ROC.
* @IEEE80211_ROC_TYPE_MGMT_TX: The remain on channel request is required
- * for sending managment frames offchannel.
+ * for sending management frames offchannel.
*/
enum ieee80211_roc_type {
IEEE80211_ROC_TYPE_NORMAL = 0,
@@ -3896,7 +3917,10 @@ struct ieee80211_ops {
*
* Even ``189`` would be wrong since 1 could be lost again.
*
- * Returns a negative error code on failure.
+ * Returns a negative error code on failure. The driver may return
+ * %IEEE80211_AMPDU_TX_START_IMMEDIATE for %IEEE80211_AMPDU_TX_START
+ * if the session can start immediately.
+ *
* The callback can sleep.
*/
int (*ampdu_action)(struct ieee80211_hw *hw,
@@ -5557,6 +5581,18 @@ void ieee80211_sta_register_airtime(struct ieee80211_sta *pubsta, u8 tid,
u32 tx_airtime, u32 rx_airtime);
/**
+ * ieee80211_txq_airtime_check - check if a txq can send frame to device
+ *
+ * @hw: pointer obtained from ieee80211_alloc_hw()
+ * @txq: pointer obtained from station or virtual interface
+ *
+ * Return true if the AQL's airtime limit has not been reached and the txq can
+ * continue to send more packets to the device. Otherwise return false.
+ */
+bool
+ieee80211_txq_airtime_check(struct ieee80211_hw *hw, struct ieee80211_txq *txq);
+
+/**
* ieee80211_iter_keys - iterate keys programmed into the device
* @hw: pointer obtained from ieee80211_alloc_hw()
* @vif: virtual interface to iterate, may be %NULL for all
@@ -5609,7 +5645,7 @@ void ieee80211_iter_keys_rcu(struct ieee80211_hw *hw,
/**
* ieee80211_iter_chan_contexts_atomic - iterate channel contexts
- * @hw: pointre obtained from ieee80211_alloc_hw().
+ * @hw: pointer obtained from ieee80211_alloc_hw().
* @iter: iterator function
* @iter_data: data passed to iterator function
*
@@ -6357,7 +6393,7 @@ ieee80211_return_txq(struct ieee80211_hw *hw, struct ieee80211_txq *txq,
* again.
*
* The API ieee80211_txq_may_transmit() also ensures that TXQ list will be
- * aligned aginst driver's own round-robin scheduler list. i.e it rotates
+ * aligned against driver's own round-robin scheduler list. i.e it rotates
* the TXQ list till it makes the requested node becomes the first entry
* in TXQ list. Thus both the TXQ list and driver's list are in sync. If this
* function returns %true, the driver is expected to schedule packets
@@ -6415,4 +6451,33 @@ void ieee80211_nan_func_match(struct ieee80211_vif *vif,
struct cfg80211_nan_match_params *match,
gfp_t gfp);
+/**
+ * ieee80211_calc_rx_airtime - calculate estimated transmission airtime for RX.
+ *
+ * This function calculates the estimated airtime usage of a frame based on the
+ * rate information in the RX status struct and the frame length.
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @status: &struct ieee80211_rx_status containing the transmission rate
+ * information.
+ * @len: frame length in bytes
+ */
+u32 ieee80211_calc_rx_airtime(struct ieee80211_hw *hw,
+ struct ieee80211_rx_status *status,
+ int len);
+
+/**
+ * ieee80211_calc_tx_airtime - calculate estimated transmission airtime for TX.
+ *
+ * This function calculates the estimated airtime usage of a frame based on the
+ * rate information in the TX info struct and the frame length.
+ *
+ * @hw: pointer as obtained from ieee80211_alloc_hw()
+ * @info: &struct ieee80211_tx_info of the frame.
+ * @len: frame length in bytes
+ */
+u32 ieee80211_calc_tx_airtime(struct ieee80211_hw *hw,
+ struct ieee80211_tx_info *info,
+ int len);
+
#endif /* MAC80211_H */
diff --git a/include/net/macsec.h b/include/net/macsec.h
new file mode 100644
index 000000000000..92e43db8b566
--- /dev/null
+++ b/include/net/macsec.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * MACsec netdev header, used for h/w accelerated implementations.
+ *
+ * Copyright (c) 2015 Sabrina Dubroca <sd@queasysnail.net>
+ */
+#ifndef _NET_MACSEC_H_
+#define _NET_MACSEC_H_
+
+#include <linux/u64_stats_sync.h>
+#include <uapi/linux/if_link.h>
+#include <uapi/linux/if_macsec.h>
+
+typedef u64 __bitwise sci_t;
+
+#define MACSEC_NUM_AN 4 /* 2 bits for the association number */
+
+/**
+ * struct macsec_key - SA key
+ * @id: user-provided key identifier
+ * @tfm: crypto struct, key storage
+ */
+struct macsec_key {
+ u8 id[MACSEC_KEYID_LEN];
+ struct crypto_aead *tfm;
+};
+
+struct macsec_rx_sc_stats {
+ __u64 InOctetsValidated;
+ __u64 InOctetsDecrypted;
+ __u64 InPktsUnchecked;
+ __u64 InPktsDelayed;
+ __u64 InPktsOK;
+ __u64 InPktsInvalid;
+ __u64 InPktsLate;
+ __u64 InPktsNotValid;
+ __u64 InPktsNotUsingSA;
+ __u64 InPktsUnusedSA;
+};
+
+struct macsec_rx_sa_stats {
+ __u32 InPktsOK;
+ __u32 InPktsInvalid;
+ __u32 InPktsNotValid;
+ __u32 InPktsNotUsingSA;
+ __u32 InPktsUnusedSA;
+};
+
+struct macsec_tx_sa_stats {
+ __u32 OutPktsProtected;
+ __u32 OutPktsEncrypted;
+};
+
+struct macsec_tx_sc_stats {
+ __u64 OutPktsProtected;
+ __u64 OutPktsEncrypted;
+ __u64 OutOctetsProtected;
+ __u64 OutOctetsEncrypted;
+};
+
+/**
+ * struct macsec_rx_sa - receive secure association
+ * @active:
+ * @next_pn: packet number expected for the next packet
+ * @lock: protects next_pn manipulations
+ * @key: key structure
+ * @stats: per-SA stats
+ */
+struct macsec_rx_sa {
+ struct macsec_key key;
+ spinlock_t lock;
+ u32 next_pn;
+ refcount_t refcnt;
+ bool active;
+ struct macsec_rx_sa_stats __percpu *stats;
+ struct macsec_rx_sc *sc;
+ struct rcu_head rcu;
+};
+
+struct pcpu_rx_sc_stats {
+ struct macsec_rx_sc_stats stats;
+ struct u64_stats_sync syncp;
+};
+
+struct pcpu_tx_sc_stats {
+ struct macsec_tx_sc_stats stats;
+ struct u64_stats_sync syncp;
+};
+
+/**
+ * struct macsec_rx_sc - receive secure channel
+ * @sci: secure channel identifier for this SC
+ * @active: channel is active
+ * @sa: array of secure associations
+ * @stats: per-SC stats
+ */
+struct macsec_rx_sc {
+ struct macsec_rx_sc __rcu *next;
+ sci_t sci;
+ bool active;
+ struct macsec_rx_sa __rcu *sa[MACSEC_NUM_AN];
+ struct pcpu_rx_sc_stats __percpu *stats;
+ refcount_t refcnt;
+ struct rcu_head rcu_head;
+};
+
+/**
+ * struct macsec_tx_sa - transmit secure association
+ * @active:
+ * @next_pn: packet number to use for the next packet
+ * @lock: protects next_pn manipulations
+ * @key: key structure
+ * @stats: per-SA stats
+ */
+struct macsec_tx_sa {
+ struct macsec_key key;
+ spinlock_t lock;
+ u32 next_pn;
+ refcount_t refcnt;
+ bool active;
+ struct macsec_tx_sa_stats __percpu *stats;
+ struct rcu_head rcu;
+};
+
+/**
+ * struct macsec_tx_sc - transmit secure channel
+ * @active:
+ * @encoding_sa: association number of the SA currently in use
+ * @encrypt: encrypt packets on transmit, or authenticate only
+ * @send_sci: always include the SCI in the SecTAG
+ * @end_station:
+ * @scb: single copy broadcast flag
+ * @sa: array of secure associations
+ * @stats: stats for this TXSC
+ */
+struct macsec_tx_sc {
+ bool active;
+ u8 encoding_sa;
+ bool encrypt;
+ bool send_sci;
+ bool end_station;
+ bool scb;
+ struct macsec_tx_sa __rcu *sa[MACSEC_NUM_AN];
+ struct pcpu_tx_sc_stats __percpu *stats;
+};
+
+/**
+ * struct macsec_secy - MACsec Security Entity
+ * @netdev: netdevice for this SecY
+ * @n_rx_sc: number of receive secure channels configured on this SecY
+ * @sci: secure channel identifier used for tx
+ * @key_len: length of keys used by the cipher suite
+ * @icv_len: length of ICV used by the cipher suite
+ * @validate_frames: validation mode
+ * @operational: MAC_Operational flag
+ * @protect_frames: enable protection for this SecY
+ * @replay_protect: enable packet number checks on receive
+ * @replay_window: size of the replay window
+ * @tx_sc: transmit secure channel
+ * @rx_sc: linked list of receive secure channels
+ */
+struct macsec_secy {
+ struct net_device *netdev;
+ unsigned int n_rx_sc;
+ sci_t sci;
+ u16 key_len;
+ u16 icv_len;
+ enum macsec_validation_type validate_frames;
+ bool operational;
+ bool protect_frames;
+ bool replay_protect;
+ u32 replay_window;
+ struct macsec_tx_sc tx_sc;
+ struct macsec_rx_sc __rcu *rx_sc;
+};
+
+/**
+ * struct macsec_context - MACsec context for hardware offloading
+ */
+struct macsec_context {
+ struct phy_device *phydev;
+ enum macsec_offload offload;
+
+ struct macsec_secy *secy;
+ struct macsec_rx_sc *rx_sc;
+ struct {
+ unsigned char assoc_num;
+ u8 key[MACSEC_KEYID_LEN];
+ union {
+ struct macsec_rx_sa *rx_sa;
+ struct macsec_tx_sa *tx_sa;
+ };
+ } sa;
+
+ u8 prepare:1;
+};
+
+/**
+ * struct macsec_ops - MACsec offloading operations
+ */
+struct macsec_ops {
+ /* Device wide */
+ int (*mdo_dev_open)(struct macsec_context *ctx);
+ int (*mdo_dev_stop)(struct macsec_context *ctx);
+ /* SecY */
+ int (*mdo_add_secy)(struct macsec_context *ctx);
+ int (*mdo_upd_secy)(struct macsec_context *ctx);
+ int (*mdo_del_secy)(struct macsec_context *ctx);
+ /* Security channels */
+ int (*mdo_add_rxsc)(struct macsec_context *ctx);
+ int (*mdo_upd_rxsc)(struct macsec_context *ctx);
+ int (*mdo_del_rxsc)(struct macsec_context *ctx);
+ /* Security associations */
+ int (*mdo_add_rxsa)(struct macsec_context *ctx);
+ int (*mdo_upd_rxsa)(struct macsec_context *ctx);
+ int (*mdo_del_rxsa)(struct macsec_context *ctx);
+ int (*mdo_add_txsa)(struct macsec_context *ctx);
+ int (*mdo_upd_txsa)(struct macsec_context *ctx);
+ int (*mdo_del_txsa)(struct macsec_context *ctx);
+};
+
+void macsec_pn_wrapped(struct macsec_secy *secy, struct macsec_tx_sa *tx_sa);
+
+#endif /* _NET_MACSEC_H_ */
diff --git a/include/net/mptcp.h b/include/net/mptcp.h
new file mode 100644
index 000000000000..c971d25431ea
--- /dev/null
+++ b/include/net/mptcp.h
@@ -0,0 +1,185 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Multipath TCP
+ *
+ * Copyright (c) 2017 - 2019, Intel Corporation.
+ */
+
+#ifndef __NET_MPTCP_H
+#define __NET_MPTCP_H
+
+#include <linux/skbuff.h>
+#include <linux/tcp.h>
+#include <linux/types.h>
+
+/* MPTCP sk_buff extension data */
+struct mptcp_ext {
+ u64 data_ack;
+ u64 data_seq;
+ u32 subflow_seq;
+ u16 data_len;
+ u8 use_map:1,
+ dsn64:1,
+ data_fin:1,
+ use_ack:1,
+ ack64:1,
+ mpc_map:1,
+ __unused:2;
+ /* one byte hole */
+};
+
+struct mptcp_out_options {
+#if IS_ENABLED(CONFIG_MPTCP)
+ u16 suboptions;
+ u64 sndr_key;
+ u64 rcvr_key;
+ struct mptcp_ext ext_copy;
+#endif
+};
+
+#ifdef CONFIG_MPTCP
+
+void mptcp_init(void);
+
+static inline bool sk_is_mptcp(const struct sock *sk)
+{
+ return tcp_sk(sk)->is_mptcp;
+}
+
+static inline bool rsk_is_mptcp(const struct request_sock *req)
+{
+ return tcp_rsk(req)->is_mptcp;
+}
+
+void mptcp_parse_option(const struct sk_buff *skb, const unsigned char *ptr,
+ int opsize, struct tcp_options_received *opt_rx);
+bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
+ unsigned int *size, struct mptcp_out_options *opts);
+void mptcp_rcv_synsent(struct sock *sk);
+bool mptcp_synack_options(const struct request_sock *req, unsigned int *size,
+ struct mptcp_out_options *opts);
+bool mptcp_established_options(struct sock *sk, struct sk_buff *skb,
+ unsigned int *size, unsigned int remaining,
+ struct mptcp_out_options *opts);
+void mptcp_incoming_options(struct sock *sk, struct sk_buff *skb,
+ struct tcp_options_received *opt_rx);
+
+void mptcp_write_options(__be32 *ptr, struct mptcp_out_options *opts);
+
+/* move the skb extension owership, with the assumption that 'to' is
+ * newly allocated
+ */
+static inline void mptcp_skb_ext_move(struct sk_buff *to,
+ struct sk_buff *from)
+{
+ if (!skb_ext_exist(from, SKB_EXT_MPTCP))
+ return;
+
+ if (WARN_ON_ONCE(to->active_extensions))
+ skb_ext_put(to);
+
+ to->active_extensions = from->active_extensions;
+ to->extensions = from->extensions;
+ from->active_extensions = 0;
+}
+
+static inline bool mptcp_ext_matches(const struct mptcp_ext *to_ext,
+ const struct mptcp_ext *from_ext)
+{
+ /* MPTCP always clears the ext when adding it to the skb, so
+ * holes do not bother us here
+ */
+ return !from_ext ||
+ (to_ext && from_ext &&
+ !memcmp(from_ext, to_ext, sizeof(struct mptcp_ext)));
+}
+
+/* check if skbs can be collapsed.
+ * MPTCP collapse is allowed if neither @to or @from carry an mptcp data
+ * mapping, or if the extension of @to is the same as @from.
+ * Collapsing is not possible if @to lacks an extension, but @from carries one.
+ */
+static inline bool mptcp_skb_can_collapse(const struct sk_buff *to,
+ const struct sk_buff *from)
+{
+ return mptcp_ext_matches(skb_ext_find(to, SKB_EXT_MPTCP),
+ skb_ext_find(from, SKB_EXT_MPTCP));
+}
+
+#else
+
+static inline void mptcp_init(void)
+{
+}
+
+static inline bool sk_is_mptcp(const struct sock *sk)
+{
+ return false;
+}
+
+static inline bool rsk_is_mptcp(const struct request_sock *req)
+{
+ return false;
+}
+
+static inline void mptcp_parse_option(const struct sk_buff *skb,
+ const unsigned char *ptr, int opsize,
+ struct tcp_options_received *opt_rx)
+{
+}
+
+static inline bool mptcp_syn_options(struct sock *sk, const struct sk_buff *skb,
+ unsigned int *size,
+ struct mptcp_out_options *opts)
+{
+ return false;
+}
+
+static inline void mptcp_rcv_synsent(struct sock *sk)
+{
+}
+
+static inline bool mptcp_synack_options(const struct request_sock *req,
+ unsigned int *size,
+ struct mptcp_out_options *opts)
+{
+ return false;
+}
+
+static inline bool mptcp_established_options(struct sock *sk,
+ struct sk_buff *skb,
+ unsigned int *size,
+ unsigned int remaining,
+ struct mptcp_out_options *opts)
+{
+ return false;
+}
+
+static inline void mptcp_incoming_options(struct sock *sk,
+ struct sk_buff *skb,
+ struct tcp_options_received *opt_rx)
+{
+}
+
+static inline void mptcp_skb_ext_move(struct sk_buff *to,
+ const struct sk_buff *from)
+{
+}
+
+static inline bool mptcp_skb_can_collapse(const struct sk_buff *to,
+ const struct sk_buff *from)
+{
+ return true;
+}
+
+#endif /* CONFIG_MPTCP */
+
+#if IS_ENABLED(CONFIG_MPTCP_IPV6)
+int mptcpv6_init(void);
+void mptcpv6_handle_mapped(struct sock *sk, bool mapped);
+#elif IS_ENABLED(CONFIG_IPV6)
+static inline int mptcpv6_init(void) { return 0; }
+static inline void mptcpv6_handle_mapped(struct sock *sk, bool mapped) { }
+#endif
+
+#endif /* __NET_MPTCP_H */
diff --git a/include/net/mrp.h b/include/net/mrp.h
index ef58b4a07190..1c308c034e1a 100644
--- a/include/net/mrp.h
+++ b/include/net/mrp.h
@@ -39,7 +39,7 @@ struct mrp_skb_cb {
static inline struct mrp_skb_cb *mrp_cb(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct mrp_skb_cb) >
- FIELD_SIZEOF(struct sk_buff, cb));
+ sizeof_field(struct sk_buff, cb));
return (struct mrp_skb_cb *)skb->cb;
}
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index b2f715ca0567..b5ebeb3b0de0 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -414,8 +414,8 @@ static inline void __ipv6_confirm_neigh(struct net_device *dev,
unsigned long now = jiffies;
/* avoid dirtying neighbour */
- if (n->confirmed != now)
- n->confirmed = now;
+ if (READ_ONCE(n->confirmed) != now)
+ WRITE_ONCE(n->confirmed, now);
}
rcu_read_unlock_bh();
}
@@ -431,8 +431,8 @@ static inline void __ipv6_confirm_neigh_stub(struct net_device *dev,
unsigned long now = jiffies;
/* avoid dirtying neighbour */
- if (n->confirmed != now)
- n->confirmed = now;
+ if (READ_ONCE(n->confirmed) != now)
+ WRITE_ONCE(n->confirmed, now);
}
rcu_read_unlock_bh();
}
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index b8452cc0e059..8ec77bfdc1a4 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -72,7 +72,6 @@ struct neigh_parms {
struct net_device *dev;
struct list_head list;
int (*neigh_setup)(struct neighbour *);
- void (*neigh_cleanup)(struct neighbour *);
struct neigh_table *tbl;
void *sysctl_table;
@@ -468,7 +467,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
do {
seq = read_seqbegin(&hh->hh_lock);
- hh_len = hh->hh_len;
+ hh_len = READ_ONCE(hh->hh_len);
if (likely(hh_len <= HH_DATA_MOD)) {
hh_alen = HH_DATA_MOD;
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index c7e15a213ef2..854d39ef1ca3 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -36,6 +36,7 @@
#include <linux/ns_common.h>
#include <linux/idr.h>
#include <linux/skbuff.h>
+#include <linux/notifier.h>
struct user_namespace;
struct proc_dir_entry;
@@ -104,6 +105,8 @@ struct net {
struct hlist_head *dev_name_head;
struct hlist_head *dev_index_head;
+ struct raw_notifier_head netdev_chain;
+
/* Note that @hash_mix can be read millions times per second,
* it is critical that it is on a read_mostly cache line.
*/
@@ -326,7 +329,8 @@ static inline struct net *read_pnet(const possible_net_t *pnet)
/* Protected by net_rwsem */
#define for_each_net(VAR) \
list_for_each_entry(VAR, &net_namespace_list, list)
-
+#define for_each_net_continue_reverse(VAR) \
+ list_for_each_entry_continue_reverse(VAR, &net_namespace_list, list)
#define for_each_net_rcu(VAR) \
list_for_each_entry_rcu(VAR, &net_namespace_list, list)
@@ -343,9 +347,9 @@ static inline struct net *read_pnet(const possible_net_t *pnet)
#endif
int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp);
-int peernet2id(struct net *net, struct net *peer);
-bool peernet_has_id(struct net *net, struct net *peer);
-struct net *get_net_ns_by_id(struct net *net, int id);
+int peernet2id(const struct net *net, struct net *peer);
+bool peernet_has_id(const struct net *net, struct net *peer);
+struct net *get_net_ns_by_id(const struct net *net, int id);
struct pernet_operations {
struct list_head list;
@@ -423,7 +427,7 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
}
#endif
-static inline int rt_genid_ipv4(struct net *net)
+static inline int rt_genid_ipv4(const struct net *net)
{
return atomic_read(&net->ipv4.rt_genid);
}
@@ -455,7 +459,7 @@ static inline void rt_genid_bump_all(struct net *net)
rt_genid_bump_ipv6(net);
}
-static inline int fnhe_genid(struct net *net)
+static inline int fnhe_genid(const struct net *net)
{
return atomic_read(&net->fnhe_genid);
}
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 112a6f40dfaf..5ae5295aa46d 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -43,7 +43,6 @@ enum nf_ct_ext_id {
/* Extensions: optional stuff which isn't permanently in struct. */
struct nf_ct_ext {
- struct rcu_head rcu;
u8 offset[NF_CT_EXT_NUM];
u8 len;
char data[0];
@@ -72,15 +71,6 @@ static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id)
/* Destroy all relationships */
void nf_ct_ext_destroy(struct nf_conn *ct);
-/* Free operation. If you want to free a object referred from private area,
- * please implement __nf_ct_ext_free() and call it.
- */
-static inline void nf_ct_ext_free(struct nf_conn *ct)
-{
- if (ct->ext)
- kfree_rcu(ct->ext, rcu);
-}
-
/* Add this type, returns pointer to data or NULL. */
void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp);
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 44b5a00a9c64..37f0fbefb060 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -81,7 +81,7 @@ struct nf_conn_help {
};
#define NF_CT_HELPER_BUILD_BUG_ON(structsize) \
- BUILD_BUG_ON((structsize) > FIELD_SIZEOF(struct nf_conn_help, data))
+ BUILD_BUG_ON((structsize) > sizeof_field(struct nf_conn_help, data))
struct nf_conntrack_helper *__nf_conntrack_helper_find(const char *name,
u16 l3num, u8 protonum);
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index b37a7d608134..e0f709d9d547 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -8,26 +8,50 @@
#include <linux/rcupdate.h>
#include <linux/netfilter.h>
#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <net/flow_offload.h>
#include <net/dst.h>
struct nf_flowtable;
+struct nf_flow_rule;
+struct flow_offload;
+enum flow_offload_tuple_dir;
struct nf_flowtable_type {
struct list_head list;
int family;
int (*init)(struct nf_flowtable *ft);
+ int (*setup)(struct nf_flowtable *ft,
+ struct net_device *dev,
+ enum flow_block_command cmd);
+ int (*action)(struct net *net,
+ const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule);
void (*free)(struct nf_flowtable *ft);
nf_hookfn *hook;
struct module *owner;
};
+enum nf_flowtable_flags {
+ NF_FLOWTABLE_HW_OFFLOAD = 0x1,
+};
+
struct nf_flowtable {
struct list_head list;
struct rhashtable rhashtable;
+ int priority;
const struct nf_flowtable_type *type;
struct delayed_work gc_work;
+ unsigned int flags;
+ struct flow_block flow_block;
+ possible_net_t net;
};
+static inline bool nf_flowtable_hw_offload(struct nf_flowtable *flowtable)
+{
+ return flowtable->flags & NF_FLOWTABLE_HW_OFFLOAD;
+}
+
enum flow_offload_tuple_dir {
FLOW_OFFLOAD_DIR_ORIGINAL = IP_CT_DIR_ORIGINAL,
FLOW_OFFLOAD_DIR_REPLY = IP_CT_DIR_REPLY,
@@ -64,21 +88,37 @@ struct flow_offload_tuple_rhash {
struct flow_offload_tuple tuple;
};
-#define FLOW_OFFLOAD_SNAT 0x1
-#define FLOW_OFFLOAD_DNAT 0x2
-#define FLOW_OFFLOAD_DYING 0x4
-#define FLOW_OFFLOAD_TEARDOWN 0x8
+enum nf_flow_flags {
+ NF_FLOW_SNAT,
+ NF_FLOW_DNAT,
+ NF_FLOW_TEARDOWN,
+ NF_FLOW_HW,
+ NF_FLOW_HW_DYING,
+ NF_FLOW_HW_DEAD,
+ NF_FLOW_HW_REFRESH,
+};
+
+enum flow_offload_type {
+ NF_FLOW_OFFLOAD_UNSPEC = 0,
+ NF_FLOW_OFFLOAD_ROUTE,
+};
struct flow_offload {
struct flow_offload_tuple_rhash tuplehash[FLOW_OFFLOAD_DIR_MAX];
- u32 flags;
- union {
- /* Your private driver data here. */
- u32 timeout;
- };
+ struct nf_conn *ct;
+ unsigned long flags;
+ u16 type;
+ u32 timeout;
+ struct rcu_head rcu_head;
};
#define NF_FLOW_TIMEOUT (30 * HZ)
+#define nf_flowtable_time_stamp (u32)jiffies
+
+static inline __s32 nf_flow_timeout_delta(unsigned int timeout)
+{
+ return (__s32)(timeout - nf_flowtable_time_stamp);
+}
struct nf_flow_route {
struct {
@@ -86,10 +126,12 @@ struct nf_flow_route {
} tuple[FLOW_OFFLOAD_DIR_MAX];
};
-struct flow_offload *flow_offload_alloc(struct nf_conn *ct,
- struct nf_flow_route *route);
+struct flow_offload *flow_offload_alloc(struct nf_conn *ct);
void flow_offload_free(struct flow_offload *flow);
+int flow_offload_route_init(struct flow_offload *flow,
+ const struct nf_flow_route *route);
+
int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow);
struct flow_offload_tuple_rhash *flow_offload_lookup(struct nf_flowtable *flow_table,
struct flow_offload_tuple *tuple);
@@ -99,10 +141,6 @@ int nf_flow_table_init(struct nf_flowtable *flow_table);
void nf_flow_table_free(struct nf_flowtable *flow_table);
void flow_offload_teardown(struct flow_offload *flow);
-static inline void flow_offload_dead(struct flow_offload *flow)
-{
- flow->flags |= FLOW_OFFLOAD_DYING;
-}
int nf_flow_snat_port(const struct flow_offload *flow,
struct sk_buff *skb, unsigned int thoff,
@@ -123,4 +161,25 @@ unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
#define MODULE_ALIAS_NF_FLOWTABLE(family) \
MODULE_ALIAS("nf-flowtable-" __stringify(family))
+void nf_flow_offload_add(struct nf_flowtable *flowtable,
+ struct flow_offload *flow);
+void nf_flow_offload_del(struct nf_flowtable *flowtable,
+ struct flow_offload *flow);
+void nf_flow_offload_stats(struct nf_flowtable *flowtable,
+ struct flow_offload *flow);
+
+void nf_flow_table_offload_flush(struct nf_flowtable *flowtable);
+int nf_flow_table_offload_setup(struct nf_flowtable *flowtable,
+ struct net_device *dev,
+ enum flow_block_command cmd);
+int nf_flow_rule_route_ipv4(struct net *net, const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule);
+int nf_flow_rule_route_ipv6(struct net *net, const struct flow_offload *flow,
+ enum flow_offload_tuple_dir dir,
+ struct nf_flow_rule *flow_rule);
+
+int nf_flow_table_offload_init(void);
+void nf_flow_table_offload_exit(void);
+
#endif /* _NF_FLOW_TABLE_H */
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 2d0275f13bbf..4170c033d461 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -114,7 +114,7 @@ static inline void nft_reg_store8(u32 *dreg, u8 val)
*(u8 *)dreg = val;
}
-static inline u8 nft_reg_load8(u32 *sreg)
+static inline u8 nft_reg_load8(const u32 *sreg)
{
return *(u8 *)sreg;
}
@@ -125,7 +125,7 @@ static inline void nft_reg_store16(u32 *dreg, u16 val)
*(u16 *)dreg = val;
}
-static inline u16 nft_reg_load16(u32 *sreg)
+static inline u16 nft_reg_load16(const u32 *sreg)
{
return *(u16 *)sreg;
}
@@ -135,7 +135,7 @@ static inline void nft_reg_store64(u32 *dreg, u64 val)
put_unaligned(val, (u64 *)dreg);
}
-static inline u64 nft_reg_load64(u32 *sreg)
+static inline u64 nft_reg_load64(const u32 *sreg)
{
return get_unaligned((u64 *)sreg);
}
@@ -231,6 +231,7 @@ struct nft_userdata {
* struct nft_set_elem - generic representation of set elements
*
* @key: element key
+ * @key_end: closing element key
* @priv: element private data and extensions
*/
struct nft_set_elem {
@@ -238,6 +239,10 @@ struct nft_set_elem {
u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
struct nft_data val;
} key;
+ union {
+ u32 buf[NFT_DATA_VALUE_MAXLEN / sizeof(u32)];
+ struct nft_data val;
+ } key_end;
void *priv;
};
@@ -259,11 +264,15 @@ struct nft_set_iter {
* @klen: key length
* @dlen: data length
* @size: number of set elements
+ * @field_len: length of each field in concatenation, bytes
+ * @field_count: number of concatenated fields in element
*/
struct nft_set_desc {
unsigned int klen;
unsigned int dlen;
unsigned int size;
+ u8 field_len[NFT_REG32_COUNT];
+ u8 field_count;
};
/**
@@ -404,6 +413,8 @@ void nft_unregister_set(struct nft_set_type *type);
* @dtype: data type (verdict or numeric type defined by userspace)
* @objtype: object type (see NFT_OBJECT_* definitions)
* @size: maximum set size
+ * @field_len: length of each field in concatenation, bytes
+ * @field_count: number of concatenated fields in element
* @use: number of rules references to this set
* @nelems: number of elements
* @ndeact: number of deactivated elements queued for removal
@@ -430,6 +441,8 @@ struct nft_set {
u32 dtype;
u32 objtype;
u32 size;
+ u8 field_len[NFT_REG32_COUNT];
+ u8 field_count;
u32 use;
atomic_t nelems;
u32 ndeact;
@@ -502,6 +515,7 @@ void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
* enum nft_set_extensions - set extension type IDs
*
* @NFT_SET_EXT_KEY: element key
+ * @NFT_SET_EXT_KEY_END: upper bound element key, for ranges
* @NFT_SET_EXT_DATA: mapping data
* @NFT_SET_EXT_FLAGS: element flags
* @NFT_SET_EXT_TIMEOUT: element timeout
@@ -513,6 +527,7 @@ void nf_tables_destroy_set(const struct nft_ctx *ctx, struct nft_set *set);
*/
enum nft_set_extensions {
NFT_SET_EXT_KEY,
+ NFT_SET_EXT_KEY_END,
NFT_SET_EXT_DATA,
NFT_SET_EXT_FLAGS,
NFT_SET_EXT_TIMEOUT,
@@ -606,6 +621,11 @@ static inline struct nft_data *nft_set_ext_key(const struct nft_set_ext *ext)
return nft_set_ext(ext, NFT_SET_EXT_KEY);
}
+static inline struct nft_data *nft_set_ext_key_end(const struct nft_set_ext *ext)
+{
+ return nft_set_ext(ext, NFT_SET_EXT_KEY_END);
+}
+
static inline struct nft_data *nft_set_ext_data(const struct nft_set_ext *ext)
{
return nft_set_ext(ext, NFT_SET_EXT_DATA);
@@ -655,7 +675,7 @@ static inline struct nft_object **nft_set_ext_obj(const struct nft_set_ext *ext)
void *nft_set_elem_init(const struct nft_set *set,
const struct nft_set_ext_tmpl *tmpl,
- const u32 *key, const u32 *data,
+ const u32 *key, const u32 *key_end, const u32 *data,
u64 timeout, u64 expiration, gfp_t gfp);
void nft_set_elem_destroy(const struct nft_set *set, void *elem,
bool destroy_expr);
@@ -964,25 +984,31 @@ struct nft_stats {
struct u64_stats_sync syncp;
};
+struct nft_hook {
+ struct list_head list;
+ struct nf_hook_ops ops;
+ struct rcu_head rcu;
+};
+
/**
* struct nft_base_chain - nf_tables base chain
*
* @ops: netfilter hook ops
+ * @hook_list: list of netfilter hooks (for NFPROTO_NETDEV family)
* @type: chain type
* @policy: default policy
* @stats: per-cpu chain stats
* @chain: the chain
- * @dev_name: device name that this base chain is attached to (if any)
* @flow_block: flow block (for hardware offload)
*/
struct nft_base_chain {
struct nf_hook_ops ops;
+ struct list_head hook_list;
const struct nft_chain_type *type;
u8 policy;
u8 flags;
struct nft_stats __percpu *stats;
struct nft_chain chain;
- char dev_name[IFNAMSIZ];
struct flow_block flow_block;
};
@@ -1147,7 +1173,7 @@ struct nft_object_ops {
int nft_register_obj(struct nft_object_type *obj_type);
void nft_unregister_obj(struct nft_object_type *obj_type);
-#define NFT_FLOWTABLE_DEVICE_MAX 8
+#define NFT_NETDEVICE_MAX 256
/**
* struct nft_flowtable - nf_tables flow table
@@ -1156,7 +1182,6 @@ void nft_unregister_obj(struct nft_object_type *obj_type);
* @table: the table the flow table is contained in
* @name: name of this flow table
* @hooknum: hook number
- * @priority: hook priority
* @ops_len: number of hooks in array
* @genmask: generation mask
* @use: number of references to this flow table
@@ -1170,13 +1195,12 @@ struct nft_flowtable {
struct nft_table *table;
char *name;
int hooknum;
- int priority;
int ops_len;
u32 genmask:2,
use:30;
u64 handle;
/* runtime data below here */
- struct nf_hook_ops *ops ____cacheline_aligned;
+ struct list_head hook_list ____cacheline_aligned;
struct nf_flowtable data;
};
diff --git a/include/net/netfilter/nf_tables_core.h b/include/net/netfilter/nf_tables_core.h
index 7281895fa6d9..29e7e1021267 100644
--- a/include/net/netfilter/nf_tables_core.h
+++ b/include/net/netfilter/nf_tables_core.h
@@ -41,7 +41,7 @@ struct nft_immediate_expr {
*/
static inline u32 nft_cmp_fast_mask(unsigned int len)
{
- return cpu_to_le32(~0U >> (FIELD_SIZEOF(struct nft_cmp_fast_expr,
+ return cpu_to_le32(~0U >> (sizeof_field(struct nft_cmp_fast_expr,
data) * BITS_PER_BYTE - len));
}
@@ -74,6 +74,7 @@ extern struct nft_set_type nft_set_hash_type;
extern struct nft_set_type nft_set_hash_fast_type;
extern struct nft_set_type nft_set_rbtree_type;
extern struct nft_set_type nft_set_bitmap_type;
+extern struct nft_set_type nft_set_pipapo_type;
struct nft_expr;
struct nft_regs;
diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
index 03cf5856d76f..ea7d1d78b92d 100644
--- a/include/net/netfilter/nf_tables_offload.h
+++ b/include/net/netfilter/nf_tables_offload.h
@@ -45,6 +45,7 @@ struct nft_flow_key {
struct flow_dissector_key_ip ip;
struct flow_dissector_key_vlan vlan;
struct flow_dissector_key_eth_addrs eth_addrs;
+ struct flow_dissector_key_meta meta;
} __aligned(BITS_PER_LONG / 8); /* Ensure that we can do comparisons as longs. */
struct nft_flow_match {
diff --git a/include/net/netlink.h b/include/net/netlink.h
index b140c8f1be22..56c365dc6dc7 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -1735,7 +1735,7 @@ static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
}
/**
- * nla_validate_nested - Validate a stream of nested attributes
+ * __nla_validate_nested - Validate a stream of nested attributes
* @start: container attribute
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
@@ -1758,9 +1758,9 @@ static inline int __nla_validate_nested(const struct nlattr *start, int maxtype,
}
static inline int
-nl80211_validate_nested(const struct nlattr *start, int maxtype,
- const struct nla_policy *policy,
- struct netlink_ext_ack *extack)
+nla_validate_nested(const struct nlattr *start, int maxtype,
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
{
return __nla_validate_nested(start, maxtype, policy,
NL_VALIDATE_STRICT, extack);
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index c0c0791b1912..08b98414d94e 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -154,6 +154,7 @@ struct netns_ipv4 {
int sysctl_tcp_adv_win_scale;
int sysctl_tcp_frto;
int sysctl_tcp_nometrics_save;
+ int sysctl_tcp_no_ssthresh_metrics_save;
int sysctl_tcp_moderate_rcvbuf;
int sysctl_tcp_tso_win_divisor;
int sysctl_tcp_workaround_signed_windows;
diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
index 022a0fd1a5a4..5ec054473d81 100644
--- a/include/net/netns/ipv6.h
+++ b/include/net/netns/ipv6.h
@@ -83,6 +83,9 @@ struct netns_ipv6 {
#ifdef CONFIG_IPV6_MULTIPLE_TABLES
unsigned int fib6_rules_require_fldissect;
bool fib6_has_custom_rules;
+#ifdef CONFIG_IPV6_SUBTREES
+ unsigned int fib6_routes_require_src;
+#endif
struct rt6_info *ip6_prohibit_entry;
struct rt6_info *ip6_blk_hole_entry;
struct fib6_table *fib6_local_tbl;
diff --git a/include/net/netns/mib.h b/include/net/netns/mib.h
index 830bdf345b17..b5fdb108d602 100644
--- a/include/net/netns/mib.h
+++ b/include/net/netns/mib.h
@@ -24,6 +24,9 @@ struct netns_mib {
#ifdef CONFIG_XFRM_STATISTICS
DEFINE_SNMP_STAT(struct linux_xfrm_mib, xfrm_statistics);
#endif
+#if IS_ENABLED(CONFIG_TLS)
+ DEFINE_SNMP_STAT(struct linux_tls_mib, tls_statistics);
+#endif
};
#endif
diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h
index 286fd960896f..a1a8d45adb42 100644
--- a/include/net/netns/nftables.h
+++ b/include/net/netns/nftables.h
@@ -7,6 +7,7 @@
struct netns_nftables {
struct list_head tables;
struct list_head commit_list;
+ struct list_head module_list;
struct mutex commit_mutex;
unsigned int base_seq;
u8 gencursor;
diff --git a/include/net/netns/sctp.h b/include/net/netns/sctp.h
index bdc0f27b8514..d8d02e4188d1 100644
--- a/include/net/netns/sctp.h
+++ b/include/net/netns/sctp.h
@@ -89,6 +89,12 @@ struct netns_sctp {
*/
int pf_retrans;
+ /* Primary.Switchover.Max.Retrans sysctl value
+ * taken from:
+ * https://tools.ietf.org/html/rfc7829
+ */
+ int ps_retrans;
+
/*
* Disable Potentially-Failed feature, the feature is enabled by default
* pf_enable - 0 : disable pf
@@ -97,6 +103,14 @@ struct netns_sctp {
int pf_enable;
/*
+ * Disable Potentially-Failed state exposure, ignored by default
+ * pf_expose - 0 : compatible with old applications (by default)
+ * - 1 : disable pf state exposure
+ * - 2 : enable pf state exposure
+ */
+ int pf_expose;
+
+ /*
* Policy for preforming sctp/socket accounting
* 0 - do socket level accounting, all assocs share sk_sndbuf
* 1 - do sctp accounting, each asoc may use sk_sndbuf bytes
diff --git a/include/net/netprio_cgroup.h b/include/net/netprio_cgroup.h
index cfc9441ef074..dec7522b6ce1 100644
--- a/include/net/netprio_cgroup.h
+++ b/include/net/netprio_cgroup.h
@@ -26,7 +26,7 @@ static inline u32 task_netprioidx(struct task_struct *p)
rcu_read_lock();
css = task_css(p, net_prio_cgrp_id);
- idx = css->cgroup->id;
+ idx = css->id;
rcu_read_unlock();
return idx;
}
diff --git a/include/net/page_pool.h b/include/net/page_pool.h
index 2cbcdbdec254..cfbed00ba7ee 100644
--- a/include/net/page_pool.h
+++ b/include/net/page_pool.h
@@ -34,8 +34,18 @@
#include <linux/ptr_ring.h>
#include <linux/dma-direction.h>
-#define PP_FLAG_DMA_MAP 1 /* Should page_pool do the DMA map/unmap */
-#define PP_FLAG_ALL PP_FLAG_DMA_MAP
+#define PP_FLAG_DMA_MAP BIT(0) /* Should page_pool do the DMA
+ * map/unmap
+ */
+#define PP_FLAG_DMA_SYNC_DEV BIT(1) /* If set all pages that the driver gets
+ * from page_pool will be
+ * DMA-synced-for-device according to
+ * the length provided by the device
+ * driver.
+ * Please note DMA-sync-for-CPU is still
+ * device driver responsibility
+ */
+#define PP_FLAG_ALL (PP_FLAG_DMA_MAP | PP_FLAG_DMA_SYNC_DEV)
/*
* Fast allocation side cache array/stack
@@ -65,12 +75,19 @@ struct page_pool_params {
int nid; /* Numa node id to allocate from pages from */
struct device *dev; /* device, for DMA pre-mapping purposes */
enum dma_data_direction dma_dir; /* DMA mapping direction */
+ unsigned int max_len; /* max DMA sync memory size */
+ unsigned int offset; /* DMA addr offset */
};
struct page_pool {
struct page_pool_params p;
- u32 pages_state_hold_cnt;
+ struct delayed_work release_dw;
+ void (*disconnect)(void *);
+ unsigned long defer_start;
+ unsigned long defer_warn;
+
+ u32 pages_state_hold_cnt;
/*
* Data structure for allocation side
@@ -107,6 +124,8 @@ struct page_pool {
* refcnt serves purpose is to simplify drivers error handling.
*/
refcount_t user_cnt;
+
+ u64 destroy_cnt;
};
struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp);
@@ -129,29 +148,23 @@ inline enum dma_data_direction page_pool_get_dma_dir(struct page_pool *pool)
struct page_pool *page_pool_create(const struct page_pool_params *params);
-void __page_pool_free(struct page_pool *pool);
-static inline void page_pool_free(struct page_pool *pool)
-{
- /* When page_pool isn't compiled-in, net/core/xdp.c doesn't
- * allow registering MEM_TYPE_PAGE_POOL, but shield linker.
- */
#ifdef CONFIG_PAGE_POOL
- __page_pool_free(pool);
-#endif
-}
-
-/* Drivers use this instead of page_pool_free */
+void page_pool_destroy(struct page_pool *pool);
+void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *));
+#else
static inline void page_pool_destroy(struct page_pool *pool)
{
- if (!pool)
- return;
+}
- page_pool_free(pool);
+static inline void page_pool_use_xdp_mem(struct page_pool *pool,
+ void (*disconnect)(void *))
+{
}
+#endif
/* Never call this directly, use helpers below */
-void __page_pool_put_page(struct page_pool *pool,
- struct page *page, bool allow_direct);
+void __page_pool_put_page(struct page_pool *pool, struct page *page,
+ unsigned int dma_sync_size, bool allow_direct);
static inline void page_pool_put_page(struct page_pool *pool,
struct page *page, bool allow_direct)
@@ -160,32 +173,14 @@ static inline void page_pool_put_page(struct page_pool *pool,
* allow registering MEM_TYPE_PAGE_POOL, but shield linker.
*/
#ifdef CONFIG_PAGE_POOL
- __page_pool_put_page(pool, page, allow_direct);
+ __page_pool_put_page(pool, page, -1, allow_direct);
#endif
}
/* Very limited use-cases allow recycle direct */
static inline void page_pool_recycle_direct(struct page_pool *pool,
struct page *page)
{
- __page_pool_put_page(pool, page, true);
-}
-
-/* API user MUST have disconnected alloc-side (not allowed to call
- * page_pool_alloc_pages()) before calling this. The free-side can
- * still run concurrently, to handle in-flight packet-pages.
- *
- * A request to shutdown can fail (with false) if there are still
- * in-flight packet-pages.
- */
-bool __page_pool_request_shutdown(struct page_pool *pool);
-static inline bool page_pool_request_shutdown(struct page_pool *pool)
-{
- bool safe_to_remove = false;
-
-#ifdef CONFIG_PAGE_POOL
- safe_to_remove = __page_pool_request_shutdown(pool);
-#endif
- return safe_to_remove;
+ __page_pool_put_page(pool, page, -1, true);
}
/* Disconnects a page (from a page_pool). API users can have a need
@@ -216,14 +211,16 @@ static inline bool is_page_pool_compiled_in(void)
#endif
}
-static inline void page_pool_get(struct page_pool *pool)
-{
- refcount_inc(&pool->user_cnt);
-}
-
static inline bool page_pool_put(struct page_pool *pool)
{
return refcount_dec_and_test(&pool->user_cnt);
}
+/* Caller must provide appropriate safe context, e.g. NAPI. */
+void page_pool_update_nid(struct page_pool *pool, int new_nid);
+static inline void page_pool_nid_changed(struct page_pool *pool, int new_nid)
+{
+ if (unlikely(pool->p.nid != new_nid))
+ page_pool_update_nid(pool, new_nid);
+}
#endif /* _NET_PAGE_POOL_H */
diff --git a/include/net/pie.h b/include/net/pie.h
new file mode 100644
index 000000000000..fd5a37cb7993
--- /dev/null
+++ b/include/net/pie.h
@@ -0,0 +1,138 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __NET_SCHED_PIE_H
+#define __NET_SCHED_PIE_H
+
+#include <linux/ktime.h>
+#include <linux/skbuff.h>
+#include <linux/types.h>
+#include <net/inet_ecn.h>
+#include <net/pkt_sched.h>
+
+#define MAX_PROB U64_MAX
+#define DTIME_INVALID U64_MAX
+#define QUEUE_THRESHOLD 16384
+#define DQCOUNT_INVALID -1
+#define PIE_SCALE 8
+
+/**
+ * struct pie_params - contains pie parameters
+ * @target: target delay in pschedtime
+ * @tudpate: interval at which drop probability is calculated
+ * @limit: total number of packets that can be in the queue
+ * @alpha: parameter to control drop probability
+ * @beta: parameter to control drop probability
+ * @ecn: is ECN marking of packets enabled
+ * @bytemode: is drop probability scaled based on pkt size
+ * @dq_rate_estimator: is Little's law used for qdelay calculation
+ */
+struct pie_params {
+ psched_time_t target;
+ u32 tupdate;
+ u32 limit;
+ u32 alpha;
+ u32 beta;
+ u8 ecn;
+ u8 bytemode;
+ u8 dq_rate_estimator;
+};
+
+/**
+ * struct pie_vars - contains pie variables
+ * @qdelay: current queue delay
+ * @qdelay_old: queue delay in previous qdelay calculation
+ * @burst_time: burst time allowance
+ * @dq_tstamp: timestamp at which dq rate was last calculated
+ * @prob: drop probability
+ * @accu_prob: accumulated drop probability
+ * @dq_count: number of bytes dequeued in a measurement cycle
+ * @avg_dq_rate: calculated average dq rate
+ * @qlen_old: queue length during previous qdelay calculation
+ * @accu_prob_overflows: number of times accu_prob overflows
+ */
+struct pie_vars {
+ psched_time_t qdelay;
+ psched_time_t qdelay_old;
+ psched_time_t burst_time;
+ psched_time_t dq_tstamp;
+ u64 prob;
+ u64 accu_prob;
+ u64 dq_count;
+ u32 avg_dq_rate;
+ u32 qlen_old;
+ u8 accu_prob_overflows;
+};
+
+/**
+ * struct pie_stats - contains pie stats
+ * @packets_in: total number of packets enqueued
+ * @dropped: packets dropped due to pie action
+ * @overlimit: packets dropped due to lack of space in queue
+ * @ecn_mark: packets marked with ECN
+ * @maxq: maximum queue size
+ */
+struct pie_stats {
+ u32 packets_in;
+ u32 dropped;
+ u32 overlimit;
+ u32 ecn_mark;
+ u32 maxq;
+};
+
+/**
+ * struct pie_skb_cb - contains private skb vars
+ * @enqueue_time: timestamp when the packet is enqueued
+ * @mem_usage: size of the skb during enqueue
+ */
+struct pie_skb_cb {
+ psched_time_t enqueue_time;
+ u32 mem_usage;
+};
+
+static inline void pie_params_init(struct pie_params *params)
+{
+ params->target = PSCHED_NS2TICKS(15 * NSEC_PER_MSEC); /* 15 ms */
+ params->tupdate = usecs_to_jiffies(15 * USEC_PER_MSEC); /* 15 ms */
+ params->limit = 1000;
+ params->alpha = 2;
+ params->beta = 20;
+ params->ecn = false;
+ params->bytemode = false;
+ params->dq_rate_estimator = false;
+}
+
+static inline void pie_vars_init(struct pie_vars *vars)
+{
+ vars->burst_time = PSCHED_NS2TICKS(150 * NSEC_PER_MSEC); /* 150 ms */
+ vars->dq_tstamp = DTIME_INVALID;
+ vars->accu_prob = 0;
+ vars->dq_count = DQCOUNT_INVALID;
+ vars->avg_dq_rate = 0;
+ vars->accu_prob_overflows = 0;
+}
+
+static inline struct pie_skb_cb *get_pie_cb(const struct sk_buff *skb)
+{
+ qdisc_cb_private_validate(skb, sizeof(struct pie_skb_cb));
+ return (struct pie_skb_cb *)qdisc_skb_cb(skb)->data;
+}
+
+static inline psched_time_t pie_get_enqueue_time(const struct sk_buff *skb)
+{
+ return get_pie_cb(skb)->enqueue_time;
+}
+
+static inline void pie_set_enqueue_time(struct sk_buff *skb)
+{
+ get_pie_cb(skb)->enqueue_time = psched_get_time();
+}
+
+bool pie_drop_early(struct Qdisc *sch, struct pie_params *params,
+ struct pie_vars *vars, u32 qlen, u32 packet_size);
+
+void pie_process_dequeue(struct sk_buff *skb, struct pie_params *params,
+ struct pie_vars *vars, u32 qlen);
+
+void pie_calculate_probability(struct pie_params *params, struct pie_vars *vars,
+ u32 qlen);
+
+#endif
diff --git a/include/net/pkt_cls.h b/include/net/pkt_cls.h
index e553fc80eb23..a972244ab193 100644
--- a/include/net/pkt_cls.h
+++ b/include/net/pkt_cls.h
@@ -141,31 +141,38 @@ __cls_set_class(unsigned long *clp, unsigned long cl)
return xchg(clp, cl);
}
-static inline unsigned long
-cls_set_class(struct Qdisc *q, unsigned long *clp, unsigned long cl)
+static inline void
+__tcf_bind_filter(struct Qdisc *q, struct tcf_result *r, unsigned long base)
{
- unsigned long old_cl;
+ unsigned long cl;
- sch_tree_lock(q);
- old_cl = __cls_set_class(clp, cl);
- sch_tree_unlock(q);
- return old_cl;
+ cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
+ cl = __cls_set_class(&r->class, cl);
+ if (cl)
+ q->ops->cl_ops->unbind_tcf(q, cl);
}
static inline void
tcf_bind_filter(struct tcf_proto *tp, struct tcf_result *r, unsigned long base)
{
struct Qdisc *q = tp->chain->block->q;
- unsigned long cl;
/* Check q as it is not set for shared blocks. In that case,
* setting class is not supported.
*/
if (!q)
return;
- cl = q->ops->cl_ops->bind_tcf(q, base, r->classid);
- cl = cls_set_class(q, &r->class, cl);
- if (cl)
+ sch_tree_lock(q);
+ __tcf_bind_filter(q, r, base);
+ sch_tree_unlock(q);
+}
+
+static inline void
+__tcf_unbind_filter(struct Qdisc *q, struct tcf_result *r)
+{
+ unsigned long cl;
+
+ if ((cl = __cls_set_class(&r->class, 0)) != 0)
q->ops->cl_ops->unbind_tcf(q, cl);
}
@@ -173,12 +180,10 @@ static inline void
tcf_unbind_filter(struct tcf_proto *tp, struct tcf_result *r)
{
struct Qdisc *q = tp->chain->block->q;
- unsigned long cl;
if (!q)
return;
- if ((cl = __cls_set_class(&r->class, 0)) != 0)
- q->ops->cl_ops->unbind_tcf(q, cl);
+ __tcf_unbind_filter(q, r);
}
struct tcf_exts {
@@ -791,9 +796,8 @@ enum tc_prio_command {
struct tc_prio_qopt_offload_params {
int bands;
u8 priomap[TC_PRIO_MAX + 1];
- /* In case that a prio qdisc is offloaded and now is changed to a
- * non-offloadedable config, it needs to update the backlog & qlen
- * values to negate the HW backlog & qlen values (and only them).
+ /* At the point of un-offloading the Qdisc, the reported backlog and
+ * qlen need to be reduced by the portion that is in HW.
*/
struct gnet_stats_queue *qstats;
};
@@ -824,4 +828,57 @@ struct tc_root_qopt_offload {
bool ingress;
};
+enum tc_ets_command {
+ TC_ETS_REPLACE,
+ TC_ETS_DESTROY,
+ TC_ETS_STATS,
+ TC_ETS_GRAFT,
+};
+
+struct tc_ets_qopt_offload_replace_params {
+ unsigned int bands;
+ u8 priomap[TC_PRIO_MAX + 1];
+ unsigned int quanta[TCQ_ETS_MAX_BANDS]; /* 0 for strict bands. */
+ unsigned int weights[TCQ_ETS_MAX_BANDS];
+ struct gnet_stats_queue *qstats;
+};
+
+struct tc_ets_qopt_offload_graft_params {
+ u8 band;
+ u32 child_handle;
+};
+
+struct tc_ets_qopt_offload {
+ enum tc_ets_command command;
+ u32 handle;
+ u32 parent;
+ union {
+ struct tc_ets_qopt_offload_replace_params replace_params;
+ struct tc_qopt_offload_stats stats;
+ struct tc_ets_qopt_offload_graft_params graft_params;
+ };
+};
+
+enum tc_tbf_command {
+ TC_TBF_REPLACE,
+ TC_TBF_DESTROY,
+ TC_TBF_STATS,
+};
+
+struct tc_tbf_qopt_offload_replace_params {
+ struct psched_ratecfg rate;
+ u32 max_size;
+ struct gnet_stats_queue *qstats;
+};
+
+struct tc_tbf_qopt_offload {
+ enum tc_tbf_command command;
+ u32 handle;
+ u32 parent;
+ union {
+ struct tc_tbf_qopt_offload_replace_params replace_params;
+ struct tc_qopt_offload_stats stats;
+ };
+};
+
#endif
diff --git a/include/net/route.h b/include/net/route.h
index 6c516840380d..a9c60fc68e36 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -185,6 +185,10 @@ int ip_route_input_rcu(struct sk_buff *skb, __be32 dst, __be32 src,
u8 tos, struct net_device *devin,
struct fib_result *res);
+int ip_route_use_hint(struct sk_buff *skb, __be32 dst, __be32 src,
+ u8 tos, struct net_device *devin,
+ const struct sk_buff *hint);
+
static inline int ip_route_input(struct sk_buff *skb, __be32 dst, __be32 src,
u8 tos, struct net_device *devin)
{
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index d80acda231ae..c30f914867e6 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -149,8 +149,8 @@ static inline bool qdisc_is_percpu_stats(const struct Qdisc *q)
static inline bool qdisc_is_empty(const struct Qdisc *qdisc)
{
if (qdisc_is_percpu_stats(qdisc))
- return qdisc->empty;
- return !qdisc->q.qlen;
+ return READ_ONCE(qdisc->empty);
+ return !READ_ONCE(qdisc->q.qlen);
}
static inline bool qdisc_run_begin(struct Qdisc *qdisc)
@@ -158,7 +158,7 @@ static inline bool qdisc_run_begin(struct Qdisc *qdisc)
if (qdisc->flags & TCQ_F_NOLOCK) {
if (!spin_trylock(&qdisc->seqlock))
return false;
- qdisc->empty = false;
+ WRITE_ONCE(qdisc->empty, false);
} else if (qdisc_is_running(qdisc)) {
return false;
}
@@ -308,6 +308,7 @@ struct tcf_proto_ops {
int (*delete)(struct tcf_proto *tp, void *arg,
bool *last, bool rtnl_held,
struct netlink_ext_ack *);
+ bool (*delete_empty)(struct tcf_proto *tp);
void (*walk)(struct tcf_proto *tp,
struct tcf_walker *arg, bool rtnl_held);
int (*reoffload)(struct tcf_proto *tp, bool add,
@@ -317,7 +318,8 @@ struct tcf_proto_ops {
void *type_data);
void (*hw_del)(struct tcf_proto *tp,
void *type_data);
- void (*bind_class)(void *, u32, unsigned long);
+ void (*bind_class)(void *, u32, unsigned long,
+ void *, unsigned long);
void * (*tmplt_create)(struct net *net,
struct tcf_chain *chain,
struct nlattr **tca,
@@ -336,6 +338,10 @@ struct tcf_proto_ops {
int flags;
};
+/* Classifiers setting TCF_PROTO_OPS_DOIT_UNLOCKED in tcf_proto_ops->flags
+ * are expected to implement tcf_proto_ops->delete_empty(), otherwise race
+ * conditions can occur when filters are inserted/deleted simultaneously.
+ */
enum tcf_proto_ops_flags {
TCF_PROTO_OPS_DOIT_UNLOCKED = 1,
};
@@ -669,22 +675,6 @@ void __qdisc_calculate_pkt_len(struct sk_buff *skb,
const struct qdisc_size_table *stab);
int skb_do_redirect(struct sk_buff *);
-static inline void skb_reset_tc(struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_CLS_ACT
- skb->tc_redirected = 0;
-#endif
-}
-
-static inline bool skb_is_tc_redirected(const struct sk_buff *skb)
-{
-#ifdef CONFIG_NET_CLS_ACT
- return skb->tc_redirected;
-#else
- return false;
-#endif
-}
-
static inline bool skb_at_tc_ingress(const struct sk_buff *skb)
{
#ifdef CONFIG_NET_CLS_ACT
@@ -1290,17 +1280,9 @@ void mini_qdisc_pair_swap(struct mini_Qdisc_pair *miniqp,
void mini_qdisc_pair_init(struct mini_Qdisc_pair *miniqp, struct Qdisc *qdisc,
struct mini_Qdisc __rcu **p_miniq);
-static inline void skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
+static inline int skb_tc_reinsert(struct sk_buff *skb, struct tcf_result *res)
{
- struct gnet_stats_queue *stats = res->qstats;
- int ret;
-
- if (res->ingress)
- ret = netif_receive_skb(skb);
- else
- ret = dev_queue_xmit(skb);
- if (ret && stats)
- qstats_overlimit_inc(res->qstats);
+ return res->ingress ? netif_receive_skb(skb) : dev_queue_xmit(skb);
}
#endif
diff --git a/include/net/sctp/constants.h b/include/net/sctp/constants.h
index 823afc42a3aa..15b4d9aec7ff 100644
--- a/include/net/sctp/constants.h
+++ b/include/net/sctp/constants.h
@@ -286,6 +286,18 @@ enum { SCTP_MAX_GABS = 16 };
* functions simpler to write.
*/
+/* These are the values for pf exposure, UNUSED is to keep compatible with old
+ * applications by default.
+ */
+enum {
+ SCTP_PF_EXPOSE_UNSET,
+ SCTP_PF_EXPOSE_DISABLE,
+ SCTP_PF_EXPOSE_ENABLE,
+};
+#define SCTP_PF_EXPOSE_MAX SCTP_PF_EXPOSE_ENABLE
+
+#define SCTP_PS_RETRANS_MAX 0xffff
+
/* These return values describe the success or failure of a number of
* routines which form the lower interface to SCTP_outqueue.
*/
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 503fbc3cd819..314a2fa21d6b 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -184,7 +184,8 @@ struct sctp_sock {
__u32 flowlabel;
__u8 dscp;
- int pf_retrans;
+ __u16 pf_retrans;
+ __u16 ps_retrans;
/* The initial Path MTU to use for new associations. */
__u32 pathmtu;
@@ -215,6 +216,7 @@ struct sctp_sock {
__u32 adaptation_ind;
__u32 pd_point;
__u16 nodelay:1,
+ pf_expose:2,
reuse:1,
disable_fragments:1,
v4mapped:1,
@@ -896,7 +898,9 @@ struct sctp_transport {
* and will be initialized from the assocs value. This can be changed
* using the SCTP_PEER_ADDR_THLDS socket option
*/
- int pf_retrans;
+ __u16 pf_retrans;
+ /* Used for primary path switchover. */
+ __u16 ps_retrans;
/* PMTU : The current known path MTU. */
__u32 pathmtu;
@@ -1239,6 +1243,9 @@ struct sctp_ep_common {
/* What socket does this endpoint belong to? */
struct sock *sk;
+ /* Cache netns and it won't change once set */
+ struct net *net;
+
/* This is where we receive inbound chunks. */
struct sctp_inq inqueue;
@@ -1772,7 +1779,9 @@ struct sctp_association {
* and will be initialized from the assocs value. This can be
* changed using the SCTP_PEER_ADDR_THLDS socket option
*/
- int pf_retrans;
+ __u16 pf_retrans;
+ /* Used for primary path switchover. */
+ __u16 ps_retrans;
/* Maximum number of times the endpoint will retransmit INIT */
__u16 max_init_attempts;
@@ -2053,6 +2062,7 @@ struct sctp_association {
__u8 need_ecne:1, /* Need to send an ECNE Chunk? */
temp:1, /* Is it a temporary association? */
+ pf_expose:2, /* Expose pf state? */
force_delay:1;
__u8 strreset_enable;
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index e1a92c4610f3..0b032b92da0b 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -80,13 +80,8 @@ struct sctp_ulpevent *sctp_ulpevent_make_assoc_change(
struct sctp_chunk *chunk,
gfp_t gfp);
-struct sctp_ulpevent *sctp_ulpevent_make_peer_addr_change(
- const struct sctp_association *asoc,
- const struct sockaddr_storage *aaddr,
- int flags,
- int state,
- int error,
- gfp_t gfp);
+void sctp_ulpevent_nofity_peer_addr_change(struct sctp_transport *transport,
+ int state, int error);
struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
const struct sctp_association *asoc,
@@ -100,6 +95,13 @@ struct sctp_ulpevent *sctp_ulpevent_make_send_failed(
__u32 error,
gfp_t gfp);
+struct sctp_ulpevent *sctp_ulpevent_make_send_failed_event(
+ const struct sctp_association *asoc,
+ struct sctp_chunk *chunk,
+ __u16 flags,
+ __u32 error,
+ gfp_t gfp);
+
struct sctp_ulpevent *sctp_ulpevent_make_shutdown_event(
const struct sctp_association *asoc,
__u16 flags,
diff --git a/include/net/smc.h b/include/net/smc.h
index bd9c0fb3b577..646feb4bc75f 100644
--- a/include/net/smc.h
+++ b/include/net/smc.h
@@ -37,6 +37,8 @@ struct smcd_dmb {
#define ISM_EVENT_GID 1
#define ISM_EVENT_SWR 2
+#define ISM_ERROR 0xFFFF
+
struct smcd_event {
u32 type;
u32 code;
@@ -75,6 +77,11 @@ struct smcd_dev {
struct workqueue_struct *event_wq;
u8 pnetid[SMC_MAX_PNETID_LEN];
bool pnetid_by_user;
+ struct list_head lgr_list;
+ spinlock_t lgr_lock;
+ atomic_t lgr_cnt;
+ wait_queue_head_t lgrs_deleted;
+ u8 going_away : 1;
};
struct smcd_dev *smcd_alloc_dev(struct device *parent, const char *name,
diff --git a/include/net/snmp.h b/include/net/snmp.h
index cb8ced4380a6..468a67836e2f 100644
--- a/include/net/snmp.h
+++ b/include/net/snmp.h
@@ -111,6 +111,12 @@ struct linux_xfrm_mib {
unsigned long mibs[LINUX_MIB_XFRMMAX];
};
+/* Linux TLS */
+#define LINUX_MIB_TLSMAX __LINUX_MIB_TLSMAX
+struct linux_tls_mib {
+ unsigned long mibs[LINUX_MIB_TLSMAX];
+};
+
#define DEFINE_SNMP_STAT(type, name) \
__typeof__(type) __percpu *name
#define DEFINE_SNMP_STAT_ATOMIC(type, name) \
diff --git a/include/net/sock.h b/include/net/sock.h
index 718e62fbe869..328564525526 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -66,7 +66,6 @@
#include <net/checksum.h>
#include <net/tcp_states.h>
#include <linux/net_tstamp.h>
-#include <net/smc.h>
#include <net/l3mdev.h>
/*
@@ -118,19 +117,26 @@ typedef __u64 __bitwise __addrpair;
* struct sock_common - minimal network layer representation of sockets
* @skc_daddr: Foreign IPv4 addr
* @skc_rcv_saddr: Bound local IPv4 addr
+ * @skc_addrpair: 8-byte-aligned __u64 union of @skc_daddr & @skc_rcv_saddr
* @skc_hash: hash value used with various protocol lookup tables
* @skc_u16hashes: two u16 hash values used by UDP lookup tables
* @skc_dport: placeholder for inet_dport/tw_dport
* @skc_num: placeholder for inet_num/tw_num
+ * @skc_portpair: __u32 union of @skc_dport & @skc_num
* @skc_family: network address family
* @skc_state: Connection state
* @skc_reuse: %SO_REUSEADDR setting
* @skc_reuseport: %SO_REUSEPORT setting
+ * @skc_ipv6only: socket is IPV6 only
+ * @skc_net_refcnt: socket is using net ref counting
* @skc_bound_dev_if: bound device index if != 0
* @skc_bind_node: bind hash linkage for various protocol lookup tables
* @skc_portaddr_node: second hash linkage for UDP/UDP-Lite protocol
* @skc_prot: protocol handlers inside a network family
* @skc_net: reference to the network namespace of this socket
+ * @skc_v6_daddr: IPV6 destination address
+ * @skc_v6_rcv_saddr: IPV6 source address
+ * @skc_cookie: socket's cookie value
* @skc_node: main hash linkage for various protocol lookup tables
* @skc_nulls_node: main hash linkage for TCP/UDP/UDP-Lite protocol
* @skc_tx_queue_mapping: tx queue number for this connection
@@ -138,7 +144,15 @@ typedef __u64 __bitwise __addrpair;
* @skc_flags: place holder for sk_flags
* %SO_LINGER (l_onoff), %SO_BROADCAST, %SO_KEEPALIVE,
* %SO_OOBINLINE settings, %SO_TIMESTAMPING settings
+ * @skc_listener: connection request listener socket (aka rsk_listener)
+ * [union with @skc_flags]
+ * @skc_tw_dr: (aka tw_dr) ptr to &struct inet_timewait_death_row
+ * [union with @skc_flags]
* @skc_incoming_cpu: record/match cpu processing incoming packets
+ * @skc_rcv_wnd: (aka rsk_rcv_wnd) TCP receive window size (possibly scaled)
+ * [union with @skc_incoming_cpu]
+ * @skc_tw_rcv_nxt: (aka tw_rcv_nxt) TCP window next expected seq number
+ * [union with @skc_incoming_cpu]
* @skc_refcnt: reference count
*
* This is the minimal network layer representation of sockets, the header
@@ -246,6 +260,7 @@ struct bpf_sk_storage;
* @sk_dst_cache: destination cache
* @sk_dst_pending_confirm: need to confirm neighbour
* @sk_policy: flow policy
+ * @sk_rx_skb_cache: cache copy of recently accessed RX skb
* @sk_receive_queue: incoming packets
* @sk_wmem_alloc: transmit queue bytes committed
* @sk_tsq_flags: TCP Small Queues flags
@@ -266,6 +281,8 @@ struct bpf_sk_storage;
* @sk_no_check_rx: allow zero checksum in RX packets
* @sk_route_caps: route capabilities (e.g. %NETIF_F_TSO)
* @sk_route_nocaps: forbidden route capabilities (e.g NETIF_F_GSO_MASK)
+ * @sk_route_forced_caps: static, forced route capabilities
+ * (set in tcp_init_sock())
* @sk_gso_type: GSO type (e.g. %SKB_GSO_TCPV4)
* @sk_gso_max_size: Maximum GSO segment size to build
* @sk_gso_max_segs: Maximum number of GSO segments
@@ -304,6 +321,8 @@ struct bpf_sk_storage;
* @sk_frag: cached page frag
* @sk_peek_off: current peek_offset value
* @sk_send_head: front of stuff to transmit
+ * @tcp_rtx_queue: TCP re-transmit queue [union with @sk_send_head]
+ * @sk_tx_skb_cache: cache copy of recently accessed TX skb
* @sk_security: used by security modules
* @sk_mark: generic packet mark
* @sk_cgrp_data: cgroup data for this cgroup
@@ -314,11 +333,14 @@ struct bpf_sk_storage;
* @sk_write_space: callback to indicate there is bf sending space available
* @sk_error_report: callback to indicate errors (e.g. %MSG_ERRQUEUE)
* @sk_backlog_rcv: callback to process the backlog
+ * @sk_validate_xmit_skb: ptr to an optional validate function
* @sk_destruct: called at sock freeing time, i.e. when all refcnt == 0
* @sk_reuseport_cb: reuseport group container
+ * @sk_bpf_storage: ptr to cache and control for bpf_sk_storage
* @sk_rcu: used during RCU grace period
* @sk_clockid: clockid used by time-based scheduling (SO_TXTIME)
* @sk_txtime_deadline_mode: set deadline mode for SO_TXTIME
+ * @sk_txtime_report_errors: set report errors mode for SO_TXTIME
* @sk_txtime_unused: unused txtime flags
*/
struct sock {
@@ -394,7 +416,9 @@ struct sock {
struct sk_filter __rcu *sk_filter;
union {
struct socket_wq __rcu *sk_wq;
+ /* private: */
struct socket_wq *sk_wq_raw;
+ /* public: */
};
#ifdef CONFIG_XFRM
struct xfrm_policy __rcu *sk_policy[2];
@@ -437,31 +461,15 @@ struct sock {
* Because of non atomicity rules, all
* changes are protected by socket lock.
*/
- unsigned int __sk_flags_offset[0];
-#ifdef __BIG_ENDIAN_BITFIELD
-#define SK_FL_PROTO_SHIFT 16
-#define SK_FL_PROTO_MASK 0x00ff0000
-
-#define SK_FL_TYPE_SHIFT 0
-#define SK_FL_TYPE_MASK 0x0000ffff
-#else
-#define SK_FL_PROTO_SHIFT 8
-#define SK_FL_PROTO_MASK 0x0000ff00
-
-#define SK_FL_TYPE_SHIFT 16
-#define SK_FL_TYPE_MASK 0xffff0000
-#endif
-
- unsigned int sk_padding : 1,
+ u8 sk_padding : 1,
sk_kern_sock : 1,
sk_no_check_tx : 1,
sk_no_check_rx : 1,
- sk_userlocks : 4,
- sk_protocol : 8,
- sk_type : 16;
-#define SK_PROTOCOL_MAX U8_MAX
- u16 sk_gso_max_segs;
+ sk_userlocks : 4;
u8 sk_pacing_shift;
+ u16 sk_type;
+ u16 sk_protocol;
+ u16 sk_gso_max_segs;
unsigned long sk_lingertime;
struct proto *sk_prot_creator;
rwlock_t sk_callback_lock;
@@ -723,6 +731,11 @@ static inline void __sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_h
hlist_nulls_add_head_rcu(&sk->sk_nulls_node, list);
}
+static inline void __sk_nulls_add_node_tail_rcu(struct sock *sk, struct hlist_nulls_head *list)
+{
+ hlist_nulls_add_tail_rcu(&sk->sk_nulls_node, list);
+}
+
static inline void sk_nulls_add_node_rcu(struct sock *sk, struct hlist_nulls_head *list)
{
sock_hold(sk);
@@ -860,17 +873,17 @@ static inline gfp_t sk_gfp_mask(const struct sock *sk, gfp_t gfp_mask)
static inline void sk_acceptq_removed(struct sock *sk)
{
- sk->sk_ack_backlog--;
+ WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog - 1);
}
static inline void sk_acceptq_added(struct sock *sk)
{
- sk->sk_ack_backlog++;
+ WRITE_ONCE(sk->sk_ack_backlog, sk->sk_ack_backlog + 1);
}
static inline bool sk_acceptq_is_full(const struct sock *sk)
{
- return sk->sk_ack_backlog > sk->sk_max_ack_backlog;
+ return READ_ONCE(sk->sk_ack_backlog) > READ_ONCE(sk->sk_max_ack_backlog);
}
/*
@@ -900,11 +913,11 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
skb_dst_force(skb);
if (!sk->sk_backlog.tail)
- sk->sk_backlog.head = skb;
+ WRITE_ONCE(sk->sk_backlog.head, skb);
else
sk->sk_backlog.tail->next = skb;
- sk->sk_backlog.tail = skb;
+ WRITE_ONCE(sk->sk_backlog.tail, skb);
skb->next = NULL;
}
@@ -1476,6 +1489,7 @@ static inline void sk_wmem_free_skb(struct sock *sk, struct sk_buff *skb)
sk_mem_uncharge(sk, skb->truesize);
if (static_branch_unlikely(&tcp_tx_skb_cache_key) &&
!sk->sk_tx_skb_cache && !skb_cloned(skb)) {
+ skb_ext_reset(skb);
skb_zcopy_clear(skb, true);
sk->sk_tx_skb_cache = skb;
return;
@@ -1489,7 +1503,7 @@ static inline void sock_release_ownership(struct sock *sk)
sk->sk_lock.owned = 0;
/* The sk_lock has mutex_unlock() semantics: */
- mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
+ mutex_release(&sk->sk_lock.dep_map, _RET_IP_);
}
}
@@ -1940,8 +1954,8 @@ struct dst_entry *sk_dst_check(struct sock *sk, u32 cookie);
static inline void sk_dst_confirm(struct sock *sk)
{
- if (!sk->sk_dst_pending_confirm)
- sk->sk_dst_pending_confirm = 1;
+ if (!READ_ONCE(sk->sk_dst_pending_confirm))
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 1);
}
static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
@@ -1951,10 +1965,10 @@ static inline void sock_confirm_neigh(struct sk_buff *skb, struct neighbour *n)
unsigned long now = jiffies;
/* avoid dirtying neighbour */
- if (n->confirmed != now)
- n->confirmed = now;
- if (sk && sk->sk_dst_pending_confirm)
- sk->sk_dst_pending_confirm = 0;
+ if (READ_ONCE(n->confirmed) != now)
+ WRITE_ONCE(n->confirmed, now);
+ if (sk && READ_ONCE(sk->sk_dst_pending_confirm))
+ WRITE_ONCE(sk->sk_dst_pending_confirm, 0);
}
}
@@ -2028,7 +2042,7 @@ static inline int skb_copy_to_page_nocache(struct sock *sk, struct iov_iter *fro
* sk_wmem_alloc_get - returns write allocations
* @sk: socket
*
- * Returns sk_wmem_alloc minus initial offset of one
+ * Return: sk_wmem_alloc minus initial offset of one
*/
static inline int sk_wmem_alloc_get(const struct sock *sk)
{
@@ -2039,7 +2053,7 @@ static inline int sk_wmem_alloc_get(const struct sock *sk)
* sk_rmem_alloc_get - returns read allocations
* @sk: socket
*
- * Returns sk_rmem_alloc
+ * Return: sk_rmem_alloc
*/
static inline int sk_rmem_alloc_get(const struct sock *sk)
{
@@ -2050,7 +2064,7 @@ static inline int sk_rmem_alloc_get(const struct sock *sk)
* sk_has_allocations - check if allocations are outstanding
* @sk: socket
*
- * Returns true if socket has write or read allocations
+ * Return: true if socket has write or read allocations
*/
static inline bool sk_has_allocations(const struct sock *sk)
{
@@ -2061,7 +2075,7 @@ static inline bool sk_has_allocations(const struct sock *sk)
* skwq_has_sleeper - check if there are any waiting processes
* @wq: struct socket_wq
*
- * Returns true if socket_wq has waiting processes
+ * Return: true if socket_wq has waiting processes
*
* The purpose of the skwq_has_sleeper and sock_poll_wait is to wrap the memory
* barrier call. They were added due to the race found within the tcp code.
@@ -2249,6 +2263,9 @@ struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp,
* gfpflags_allow_blocking() isn't enough here as direct reclaim may nest
* inside other socket operations and end up recursing into sk_page_frag()
* while it's already in use.
+ *
+ * Return: a per task page_frag if context allows that,
+ * otherwise a per socket one.
*/
static inline struct page_frag *sk_page_frag(struct sock *sk)
{
@@ -2306,7 +2323,7 @@ struct sock_skb_cb {
* using skb->cb[] would keep using it directly and utilize its
* alignement guarantee.
*/
-#define SOCK_SKB_CB_OFFSET ((FIELD_SIZEOF(struct sk_buff, cb) - \
+#define SOCK_SKB_CB_OFFSET ((sizeof_field(struct sk_buff, cb) - \
sizeof(struct sock_skb_cb)))
#define SOCK_SKB_CB(__skb) ((struct sock_skb_cb *)((__skb)->cb + \
@@ -2443,6 +2460,7 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
&skb_shinfo(skb)->tskey);
}
+DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
/**
* sk_eat_skb - Release a skb if it is no longer needed
* @sk: socket to eat this skb from
@@ -2451,7 +2469,6 @@ static inline void skb_setup_tx_timestamp(struct sk_buff *skb, __u16 tsflags)
* This routine must be called with interrupts disabled or with the socket
* locked so that the sk_buff queue operation is ok.
*/
-DECLARE_STATIC_KEY_FALSE(tcp_rx_skb_cache_key);
static inline void sk_eat_skb(struct sock *sk, struct sk_buff *skb)
{
__skb_unlink(skb, &sk->sk_receive_queue);
@@ -2528,7 +2545,7 @@ static inline bool sk_listener(const struct sock *sk)
return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
}
-void sock_enable_timestamp(struct sock *sk, int flag);
+void sock_enable_timestamp(struct sock *sk, enum sock_flags flag);
int sock_recv_errqueue(struct sock *sk, struct msghdr *msg, int len, int level,
int type);
@@ -2584,9 +2601,9 @@ static inline int sk_get_rmem0(const struct sock *sk, const struct proto *proto)
*/
static inline void sk_pacing_shift_update(struct sock *sk, int val)
{
- if (!sk || !sk_fullsock(sk) || sk->sk_pacing_shift == val)
+ if (!sk || !sk_fullsock(sk) || READ_ONCE(sk->sk_pacing_shift) == val)
return;
- sk->sk_pacing_shift = val;
+ WRITE_ONCE(sk->sk_pacing_shift, val);
}
/* if a socket is bound to a device, check that the given device
@@ -2608,4 +2625,6 @@ static inline bool sk_dev_equal_l3scope(struct sock *sk, int dif)
return false;
}
+void sock_def_readable(struct sock *sk);
+
#endif /* _SOCK_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index ab4eb5eb5d07..a5ea27df3c2b 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -39,6 +39,7 @@
#include <net/tcp_states.h>
#include <net/inet_ecn.h>
#include <net/dst.h>
+#include <net/mptcp.h>
#include <linux/seq_file.h>
#include <linux/memcontrol.h>
@@ -182,6 +183,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
#define TCPOPT_SACK 5 /* SACK Block */
#define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
#define TCPOPT_MD5SIG 19 /* MD5 Signature (RFC2385) */
+#define TCPOPT_MPTCP 30 /* Multipath TCP (RFC6824) */
#define TCPOPT_FASTOPEN 34 /* Fast open (RFC7413) */
#define TCPOPT_EXP 254 /* Experimental */
/* Magic number to be after the option value for sharing TCP
@@ -328,6 +330,9 @@ int tcp_sendpage_locked(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
size_t size, int flags);
+int tcp_send_mss(struct sock *sk, int *size_goal, int flags);
+void tcp_push(struct sock *sk, int flags, int mss_now, int nonagle,
+ int size_goal);
void tcp_release_cb(struct sock *sk);
void tcp_wfree(struct sk_buff *skb);
void tcp_write_timer_handler(struct sock *sk);
@@ -494,15 +499,16 @@ static inline void tcp_synq_overflow(const struct sock *sk)
reuse = rcu_dereference(sk->sk_reuseport_cb);
if (likely(reuse)) {
last_overflow = READ_ONCE(reuse->synq_overflow_ts);
- if (time_after32(now, last_overflow + HZ))
+ if (!time_between32(now, last_overflow,
+ last_overflow + HZ))
WRITE_ONCE(reuse->synq_overflow_ts, now);
return;
}
}
- last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
- if (time_after32(now, last_overflow + HZ))
- tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
+ last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
+ if (!time_between32(now, last_overflow, last_overflow + HZ))
+ WRITE_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp, now);
}
/* syncookies: no recent synqueue overflow on this listening socket? */
@@ -517,13 +523,23 @@ static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
reuse = rcu_dereference(sk->sk_reuseport_cb);
if (likely(reuse)) {
last_overflow = READ_ONCE(reuse->synq_overflow_ts);
- return time_after32(now, last_overflow +
- TCP_SYNCOOKIE_VALID);
+ return !time_between32(now, last_overflow - HZ,
+ last_overflow +
+ TCP_SYNCOOKIE_VALID);
}
}
- last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
- return time_after32(now, last_overflow + TCP_SYNCOOKIE_VALID);
+ last_overflow = READ_ONCE(tcp_sk(sk)->rx_opt.ts_recent_stamp);
+
+ /* If last_overflow <= jiffies <= last_overflow + TCP_SYNCOOKIE_VALID,
+ * then we're under synflood. However, we have to use
+ * 'last_overflow - HZ' as lower bound. That's because a concurrent
+ * tcp_synq_overflow() could update .ts_recent_stamp after we read
+ * jiffies but before we store .ts_recent_stamp into last_overflow,
+ * which could lead to rejecting a valid syncookie.
+ */
+ return !time_between32(now, last_overflow - HZ,
+ last_overflow + TCP_SYNCOOKIE_VALID);
}
static inline u32 tcp_cookie_time(void)
@@ -537,7 +553,7 @@ static inline u32 tcp_cookie_time(void)
u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
u16 *mssp);
__u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
-u64 cookie_init_timestamp(struct request_sock *req);
+u64 cookie_init_timestamp(struct request_sock *req, u64 now);
bool cookie_timestamp_decode(const struct net *net,
struct tcp_options_received *opt);
bool cookie_ecn_ok(const struct tcp_options_received *opt,
@@ -757,10 +773,16 @@ static inline u32 tcp_time_stamp(const struct tcp_sock *tp)
return div_u64(tp->tcp_mstamp, USEC_PER_SEC / TCP_TS_HZ);
}
+/* Convert a nsec timestamp into TCP TSval timestamp (ms based currently) */
+static inline u32 tcp_ns_to_ts(u64 ns)
+{
+ return div_u64(ns, NSEC_PER_SEC / TCP_TS_HZ);
+}
+
/* Could use tcp_clock_us() / 1000, but this version uses a single divide */
static inline u32 tcp_time_stamp_raw(void)
{
- return div_u64(tcp_clock_ns(), NSEC_PER_SEC / TCP_TS_HZ);
+ return tcp_ns_to_ts(tcp_clock_ns());
}
void tcp_mstamp_refresh(struct tcp_sock *tp);
@@ -772,7 +794,7 @@ static inline u32 tcp_stamp_us_delta(u64 t1, u64 t0)
static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
{
- return div_u64(skb->skb_mstamp_ns, NSEC_PER_SEC / TCP_TS_HZ);
+ return tcp_ns_to_ts(skb->skb_mstamp_ns);
}
/* provide the departure time in us unit */
@@ -960,6 +982,13 @@ static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
return likely(!TCP_SKB_CB(skb)->eor);
}
+static inline bool tcp_skb_can_collapse(const struct sk_buff *to,
+ const struct sk_buff *from)
+{
+ return likely(tcp_skb_can_collapse_to(to) &&
+ mptcp_skb_can_collapse(to, from));
+}
+
/* Events passed to congestion control interface */
enum tcp_ca_event {
CA_EVENT_TX_START, /* first transmit when no packets in flight */
@@ -990,6 +1019,7 @@ enum tcp_ca_ack_event_flags {
#define TCP_CONG_NON_RESTRICTED 0x1
/* Requires ECN/ECT set on all packets */
#define TCP_CONG_NEEDS_ECN 0x2
+#define TCP_CONG_MASK (TCP_CONG_NON_RESTRICTED | TCP_CONG_NEEDS_ECN)
union tcp_cc_info;
@@ -1084,6 +1114,7 @@ u32 tcp_reno_undo_cwnd(struct sock *sk);
void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
extern struct tcp_congestion_ops tcp_reno;
+struct tcp_congestion_ops *tcp_ca_find(const char *name);
struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
u32 tcp_ca_get_key_by_name(struct net *net, const char *name, bool *ecn_ca);
#ifdef CONFIG_INET
@@ -1515,8 +1546,9 @@ struct tcp_md5sig_key {
struct hlist_node node;
u8 keylen;
u8 family; /* AF_INET or AF_INET6 */
- union tcp_md5_addr addr;
u8 prefixlen;
+ union tcp_md5_addr addr;
+ int l3index; /* set if key added with L3 scope */
u8 key[TCP_MD5SIG_MAXKEYLEN];
struct rcu_head rcu;
};
@@ -1560,34 +1592,33 @@ struct tcp_md5sig_pool {
int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
const struct sock *sk, const struct sk_buff *skb);
int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
- int family, u8 prefixlen, const u8 *newkey, u8 newkeylen,
- gfp_t gfp);
+ int family, u8 prefixlen, int l3index,
+ const u8 *newkey, u8 newkeylen, gfp_t gfp);
int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
- int family, u8 prefixlen);
+ int family, u8 prefixlen, int l3index);
struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
const struct sock *addr_sk);
#ifdef CONFIG_TCP_MD5SIG
#include <linux/jump_label.h>
extern struct static_key_false tcp_md5_needed;
-struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk,
+struct tcp_md5sig_key *__tcp_md5_do_lookup(const struct sock *sk, int l3index,
const union tcp_md5_addr *addr,
int family);
static inline struct tcp_md5sig_key *
-tcp_md5_do_lookup(const struct sock *sk,
- const union tcp_md5_addr *addr,
- int family)
+tcp_md5_do_lookup(const struct sock *sk, int l3index,
+ const union tcp_md5_addr *addr, int family)
{
if (!static_branch_unlikely(&tcp_md5_needed))
return NULL;
- return __tcp_md5_do_lookup(sk, addr, family);
+ return __tcp_md5_do_lookup(sk, l3index, addr, family);
}
#define tcp_twsk_md5_key(twsk) ((twsk)->tw_md5_key)
#else
-static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
- const union tcp_md5_addr *addr,
- int family)
+static inline struct tcp_md5sig_key *
+tcp_md5_do_lookup(const struct sock *sk, int l3index,
+ const union tcp_md5_addr *addr, int family)
{
return NULL;
}
@@ -1749,9 +1780,18 @@ static inline bool tcp_skb_is_last(const struct sock *sk,
return skb_queue_is_last(&sk->sk_write_queue, skb);
}
+/**
+ * tcp_write_queue_empty - test if any payload (or FIN) is available in write queue
+ * @sk: socket
+ *
+ * Since the write queue can have a temporary empty skb in it,
+ * we must not use "return skb_queue_empty(&sk->sk_write_queue)"
+ */
static inline bool tcp_write_queue_empty(const struct sock *sk)
{
- return skb_queue_empty(&sk->sk_write_queue);
+ const struct tcp_sock *tp = tcp_sk(sk);
+
+ return tp->write_seq == tp->snd_nxt;
}
static inline bool tcp_rtx_queue_empty(const struct sock *sk)
@@ -1976,6 +2016,11 @@ struct tcp_request_sock_ops {
enum tcp_synack_type synack_type);
};
+extern const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops;
+#if IS_ENABLED(CONFIG_IPV6)
+extern const struct tcp_request_sock_ops tcp_request_sock_ipv6_ops;
+#endif
+
#ifdef CONFIG_SYN_COOKIES
static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
const struct sock *sk, struct sk_buff *skb,
@@ -2121,12 +2166,16 @@ struct tcp_ulp_ops {
/* initialize ulp */
int (*init)(struct sock *sk);
/* update ulp */
- void (*update)(struct sock *sk, struct proto *p);
+ void (*update)(struct sock *sk, struct proto *p,
+ void (*write_space)(struct sock *sk));
/* cleanup ulp */
void (*release)(struct sock *sk);
/* diagnostic */
int (*get_info)(const struct sock *sk, struct sk_buff *skb);
size_t (*get_info_size)(const struct sock *sk);
+ /* clone ulp */
+ void (*clone)(const struct request_sock *req, struct sock *newsk,
+ const gfp_t priority);
char name[TCP_ULP_NAME_MAX];
struct module *owner;
@@ -2136,7 +2185,8 @@ void tcp_unregister_ulp(struct tcp_ulp_ops *type);
int tcp_set_ulp(struct sock *sk, const char *name);
void tcp_get_available_ulp(char *buf, size_t len);
void tcp_cleanup_ulp(struct sock *sk);
-void tcp_update_ulp(struct sock *sk, struct proto *p);
+void tcp_update_ulp(struct sock *sk, struct proto *p,
+ void (*write_space)(struct sock *sk));
#define MODULE_ALIAS_TCP_ULP(name) \
__MODULE_INFO(alias, alias_userspace, name); \
diff --git a/include/net/tls.h b/include/net/tls.h
index f4ad831eaa02..bf9eb4823933 100644
--- a/include/net/tls.h
+++ b/include/net/tls.h
@@ -44,6 +44,7 @@
#include <linux/netdevice.h>
#include <linux/rcupdate.h>
+#include <net/net_namespace.h>
#include <net/tcp.h>
#include <net/strparser.h>
#include <crypto/aead.h>
@@ -61,7 +62,6 @@
#define TLS_RECORD_TYPE_DATA 0x17
#define TLS_AAD_SPACE_SIZE 13
-#define TLS_DEVICE_NAME_MAX 32
#define MAX_IV_SIZE 16
#define TLS_MAX_REC_SEQ_SIZE 8
@@ -75,36 +75,14 @@
*/
#define TLS_AES_CCM_IV_B0_BYTE 2
-/*
- * This structure defines the routines for Inline TLS driver.
- * The following routines are optional and filled with a
- * null pointer if not defined.
- *
- * @name: Its the name of registered Inline tls device
- * @dev_list: Inline tls device list
- * int (*feature)(struct tls_device *device);
- * Called to return Inline TLS driver capability
- *
- * int (*hash)(struct tls_device *device, struct sock *sk);
- * This function sets Inline driver for listen and program
- * device specific functioanlity as required
- *
- * void (*unhash)(struct tls_device *device, struct sock *sk);
- * This function cleans listen state set by Inline TLS driver
- *
- * void (*release)(struct kref *kref);
- * Release the registered device and allocated resources
- * @kref: Number of reference to tls_device
- */
-struct tls_device {
- char name[TLS_DEVICE_NAME_MAX];
- struct list_head dev_list;
- int (*feature)(struct tls_device *device);
- int (*hash)(struct tls_device *device, struct sock *sk);
- void (*unhash)(struct tls_device *device, struct sock *sk);
- void (*release)(struct kref *kref);
- struct kref kref;
-};
+#define __TLS_INC_STATS(net, field) \
+ __SNMP_INC_STATS((net)->mib.tls_statistics, field)
+#define TLS_INC_STATS(net, field) \
+ SNMP_INC_STATS((net)->mib.tls_statistics, field)
+#define __TLS_DEC_STATS(net, field) \
+ __SNMP_DEC_STATS((net)->mib.tls_statistics, field)
+#define TLS_DEC_STATS(net, field) \
+ SNMP_DEC_STATS((net)->mib.tls_statistics, field)
enum {
TLS_BASE,
@@ -122,7 +100,6 @@ struct tls_rec {
struct list_head list;
int tx_ready;
int tx_flags;
- int inplace_crypto;
struct sk_msg msg_plaintext;
struct sk_msg msg_encrypted;
@@ -159,7 +136,7 @@ struct tls_sw_context_tx {
struct list_head tx_list;
atomic_t encrypt_pending;
int async_notify;
- int async_capable;
+ u8 async_capable:1;
#define BIT_TX_SCHEDULED 0
#define BIT_TX_CLOSING 1
@@ -175,8 +152,8 @@ struct tls_sw_context_rx {
struct sk_buff *recv_pkt;
u8 control;
- int async_capable;
- bool decrypted;
+ u8 async_capable:1;
+ u8 decrypted:1;
atomic_t decrypt_pending;
bool async_notify;
};
@@ -345,7 +322,10 @@ struct tls_offload_context_rx {
#define TLS_OFFLOAD_CONTEXT_SIZE_RX \
(sizeof(struct tls_offload_context_rx) + TLS_DRIVER_STATE_SIZE_RX)
+struct tls_context *tls_ctx_create(struct sock *sk);
void tls_ctx_free(struct sock *sk, struct tls_context *ctx);
+void update_sk_prot(struct sock *sk, struct tls_context *ctx);
+
int wait_on_pending_writer(struct sock *sk, long *timeo);
int tls_sk_query(struct sock *sk, int optname, char __user *optval,
int __user *optlen);
@@ -396,7 +376,7 @@ int tls_push_sg(struct sock *sk, struct tls_context *ctx,
int flags);
int tls_push_partial_record(struct sock *sk, struct tls_context *ctx,
int flags);
-bool tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
+void tls_free_partial_record(struct sock *sk, struct tls_context *ctx);
static inline struct tls_msg *tls_msg(struct sk_buff *skb)
{
@@ -630,13 +610,6 @@ tls_offload_rx_resync_set_type(struct sock *sk, enum tls_offload_sync_type type)
tls_offload_ctx_rx(tls_ctx)->resync_type = type;
}
-static inline void tls_offload_tx_resync_request(struct sock *sk)
-{
- struct tls_context *tls_ctx = tls_get_ctx(sk);
-
- WARN_ON(test_and_set_bit(TLS_TX_SYNC_SCHED, &tls_ctx->flags));
-}
-
/* Driver's seq tracking has to be disabled until resync succeeded */
static inline bool tls_offload_tx_resync_pending(struct sock *sk)
{
@@ -648,10 +621,11 @@ static inline bool tls_offload_tx_resync_pending(struct sock *sk)
return ret;
}
+int __net_init tls_proc_init(struct net *net);
+void __net_exit tls_proc_fini(struct net *net);
+
int tls_proccess_cmsg(struct sock *sk, struct msghdr *msg,
unsigned char *record_type);
-void tls_register_device(struct tls_device *device);
-void tls_unregister_device(struct tls_device *device);
int decrypt_skb(struct sock *sk, struct sk_buff *skb,
struct scatterlist *sgout);
struct sk_buff *tls_encrypt_skb(struct sk_buff *skb);
@@ -667,12 +641,23 @@ int tls_sw_fallback_init(struct sock *sk,
#ifdef CONFIG_TLS_DEVICE
void tls_device_init(void);
void tls_device_cleanup(void);
+void tls_device_sk_destruct(struct sock *sk);
int tls_set_device_offload(struct sock *sk, struct tls_context *ctx);
void tls_device_free_resources_tx(struct sock *sk);
int tls_set_device_offload_rx(struct sock *sk, struct tls_context *ctx);
void tls_device_offload_cleanup_rx(struct sock *sk);
void tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq);
-int tls_device_decrypted(struct sock *sk, struct sk_buff *skb);
+void tls_offload_tx_resync_request(struct sock *sk, u32 got_seq, u32 exp_seq);
+int tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
+ struct sk_buff *skb, struct strp_msg *rxm);
+
+static inline bool tls_is_sk_rx_device_offloaded(struct sock *sk)
+{
+ if (!sk_fullsock(sk) ||
+ smp_load_acquire(&sk->sk_destruct) != tls_device_sk_destruct)
+ return false;
+ return tls_get_ctx(sk)->rx_conf == TLS_HW;
+}
#else
static inline void tls_device_init(void) {}
static inline void tls_device_cleanup(void) {}
@@ -695,7 +680,9 @@ static inline void tls_device_offload_cleanup_rx(struct sock *sk) {}
static inline void
tls_device_rx_resync_new_rec(struct sock *sk, u32 rcd_len, u32 seq) {}
-static inline int tls_device_decrypted(struct sock *sk, struct sk_buff *skb)
+static inline int
+tls_device_decrypted(struct sock *sk, struct tls_context *tls_ctx,
+ struct sk_buff *skb, struct strp_msg *rxm)
{
return 0;
}
diff --git a/include/net/tls_toe.h b/include/net/tls_toe.h
new file mode 100644
index 000000000000..b3aa7593ce2c
--- /dev/null
+++ b/include/net/tls_toe.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2016-2017, Mellanox Technologies. All rights reserved.
+ * Copyright (c) 2016-2017, Dave Watson <davejwatson@fb.com>. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/kref.h>
+#include <linux/list.h>
+
+struct sock;
+
+#define TLS_TOE_DEVICE_NAME_MAX 32
+
+/*
+ * This structure defines the routines for Inline TLS driver.
+ * The following routines are optional and filled with a
+ * null pointer if not defined.
+ *
+ * @name: Its the name of registered Inline tls device
+ * @dev_list: Inline tls device list
+ * int (*feature)(struct tls_toe_device *device);
+ * Called to return Inline TLS driver capability
+ *
+ * int (*hash)(struct tls_toe_device *device, struct sock *sk);
+ * This function sets Inline driver for listen and program
+ * device specific functioanlity as required
+ *
+ * void (*unhash)(struct tls_toe_device *device, struct sock *sk);
+ * This function cleans listen state set by Inline TLS driver
+ *
+ * void (*release)(struct kref *kref);
+ * Release the registered device and allocated resources
+ * @kref: Number of reference to tls_toe_device
+ */
+struct tls_toe_device {
+ char name[TLS_TOE_DEVICE_NAME_MAX];
+ struct list_head dev_list;
+ int (*feature)(struct tls_toe_device *device);
+ int (*hash)(struct tls_toe_device *device, struct sock *sk);
+ void (*unhash)(struct tls_toe_device *device, struct sock *sk);
+ void (*release)(struct kref *kref);
+ struct kref kref;
+};
+
+int tls_toe_bypass(struct sock *sk);
+int tls_toe_hash(struct sock *sk);
+void tls_toe_unhash(struct sock *sk);
+
+void tls_toe_register_device(struct tls_toe_device *device);
+void tls_toe_unregister_device(struct tls_toe_device *device);
diff --git a/include/net/udp.h b/include/net/udp.h
index bad74f780831..e55d5f765807 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -167,7 +167,7 @@ typedef struct sock *(*udp_lookup_t)(struct sk_buff *skb, __be16 sport,
__be16 dport);
struct sk_buff *udp_gro_receive(struct list_head *head, struct sk_buff *skb,
- struct udphdr *uh, udp_lookup_t lookup);
+ struct udphdr *uh, struct sock *sk);
int udp_gro_complete(struct sk_buff *skb, int nhoff, udp_lookup_t lookup);
struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
@@ -476,6 +476,16 @@ static inline struct sk_buff *udp_rcv_segment(struct sock *sk,
if (!inet_get_convert_csum(sk))
features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+ /* UDP segmentation expects packets of type CHECKSUM_PARTIAL or
+ * CHECKSUM_NONE in __udp_gso_segment. UDP GRO indeed builds partial
+ * packets in udp_gro_complete_segment. As does UDP GSO, verified by
+ * udp_send_skb. But when those packets are looped in dev_loopback_xmit
+ * their ip_summed is set to CHECKSUM_UNNECESSARY. Reset in this
+ * specific case, where PARTIAL is both correct and required.
+ */
+ if (skb->pkt_type == PACKET_LOOPBACK)
+ skb->ip_summed = CHECKSUM_PARTIAL;
+
/* the GSO CB lays after the UDP one, no need to save and restore any
* CB fragment
*/
diff --git a/include/net/vsock_addr.h b/include/net/vsock_addr.h
index 57d2db5c4bdf..cf8cc140d68d 100644
--- a/include/net/vsock_addr.h
+++ b/include/net/vsock_addr.h
@@ -8,7 +8,7 @@
#ifndef _VSOCK_ADDR_H_
#define _VSOCK_ADDR_H_
-#include <linux/vm_sockets.h>
+#include <uapi/linux/vm_sockets.h>
void vsock_addr_init(struct sockaddr_vm *addr, u32 cid, u32 port);
int vsock_addr_validate(const struct sockaddr_vm *addr);
diff --git a/include/net/x25.h b/include/net/x25.h
index ed1acc3044ac..d7d6c2b4ffa7 100644
--- a/include/net/x25.h
+++ b/include/net/x25.h
@@ -62,7 +62,8 @@ enum {
X25_STATE_1, /* Awaiting Call Accepted */
X25_STATE_2, /* Awaiting Clear Confirmation */
X25_STATE_3, /* Data Transfer */
- X25_STATE_4 /* Awaiting Reset Confirmation */
+ X25_STATE_4, /* Awaiting Reset Confirmation */
+ X25_STATE_5 /* Call Accepted / Call Connected pending */
};
enum {
diff --git a/include/net/xdp_priv.h b/include/net/xdp_priv.h
index 6a8cba6ea79a..a9d5b7603b89 100644
--- a/include/net/xdp_priv.h
+++ b/include/net/xdp_priv.h
@@ -12,12 +12,8 @@ struct xdp_mem_allocator {
struct page_pool *page_pool;
struct zero_copy_allocator *zc_alloc;
};
- int disconnect_cnt;
- unsigned long defer_start;
struct rhash_head node;
struct rcu_head rcu;
- struct delayed_work defer_wq;
- unsigned long defer_warn;
};
#endif /* __LINUX_NET_XDP_PRIV_H__ */
diff --git a/include/net/xdp_sock.h b/include/net/xdp_sock.h
index c9398ce7960f..e86ec48ef627 100644
--- a/include/net/xdp_sock.h
+++ b/include/net/xdp_sock.h
@@ -69,7 +69,13 @@ struct xdp_umem {
/* Nodes are linked in the struct xdp_sock map_list field, and used to
* track which maps a certain socket reside in.
*/
-struct xsk_map;
+
+struct xsk_map {
+ struct bpf_map map;
+ spinlock_t lock; /* Synchronize map updates */
+ struct xdp_sock *xsk_map[];
+};
+
struct xsk_map_node {
struct list_head node;
struct xsk_map *map;
@@ -109,13 +115,11 @@ struct xdp_sock {
struct xdp_buff;
#ifdef CONFIG_XDP_SOCKETS
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
-int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
-void xsk_flush(struct xdp_sock *xs);
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
/* Used from netdev driver */
bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
-u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
-void xsk_umem_discard_addr(struct xdp_umem *umem);
+bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
+void xsk_umem_release_addr(struct xdp_umem *umem);
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
void xsk_umem_consume_tx_done(struct xdp_umem *umem);
@@ -134,6 +138,21 @@ void xsk_map_try_sock_delete(struct xsk_map *map, struct xdp_sock *xs,
struct xdp_sock **map_entry);
int xsk_map_inc(struct xsk_map *map);
void xsk_map_put(struct xsk_map *map);
+int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
+void __xsk_map_flush(void);
+
+static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
+ u32 key)
+{
+ struct xsk_map *m = container_of(map, struct xsk_map, map);
+ struct xdp_sock *xs;
+
+ if (key >= map->max_entries)
+ return NULL;
+
+ xs = READ_ONCE(m->xsk_map[key]);
+ return xs;
+}
static inline u64 xsk_umem_extract_addr(u64 addr)
{
@@ -178,7 +197,7 @@ static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
return xsk_umem_has_addrs(umem, cnt - rq->length);
}
-static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
+static inline bool xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
{
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
@@ -189,12 +208,12 @@ static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
return addr;
}
-static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
+static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
{
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
if (!rq->length)
- xsk_umem_discard_addr(umem);
+ xsk_umem_release_addr(umem);
else
rq->length--;
}
@@ -224,15 +243,6 @@ static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
return -ENOTSUPP;
}
-static inline int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
-{
- return -ENOTSUPP;
-}
-
-static inline void xsk_flush(struct xdp_sock *xs)
-{
-}
-
static inline bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs)
{
return false;
@@ -248,7 +258,7 @@ static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
return NULL;
}
-static inline void xsk_umem_discard_addr(struct xdp_umem *umem)
+static inline void xsk_umem_release_addr(struct xdp_umem *umem)
{
}
@@ -322,7 +332,7 @@ static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
return NULL;
}
-static inline void xsk_umem_discard_addr_rq(struct xdp_umem *umem)
+static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
{
}
@@ -357,6 +367,20 @@ static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
return 0;
}
+static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void __xsk_map_flush(void)
+{
+}
+
+static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
+ u32 key)
+{
+ return NULL;
+}
#endif /* CONFIG_XDP_SOCKETS */
#endif /* _LINUX_XDP_SOCK_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index aa08a7a5f6ac..8f71c111e65a 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -193,6 +193,7 @@ struct xfrm_state {
/* Data for encapsulator */
struct xfrm_encap_tmpl *encap;
+ struct sock __rcu *encap_sk;
/* Data for care-of address */
xfrm_address_t *coaddr;
@@ -1547,6 +1548,9 @@ int __xfrm_init_state(struct xfrm_state *x, bool init_replay, bool offload);
int xfrm_init_state(struct xfrm_state *x);
int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type);
int xfrm_input_resume(struct sk_buff *skb, int nexthdr);
+int xfrm_trans_queue_net(struct net *net, struct sk_buff *skb,
+ int (*finish)(struct net *, struct sock *,
+ struct sk_buff *));
int xfrm_trans_queue(struct sk_buff *skb,
int (*finish)(struct net *, struct sock *,
struct sk_buff *));
@@ -1613,13 +1617,6 @@ static inline int xfrm_user_policy(struct sock *sk, int optname, u8 __user *optv
{
return -ENOPROTOOPT;
}
-
-static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
-{
- /* should not happen */
- kfree_skb(skb);
- return 0;
-}
#endif
struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index 49f4f75499b3..8ec482e391aa 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -1,38 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright (c) 2004, 2005 Intel Corporation. All rights reserved.
* Copyright (c) 2004 Topspin Corporation. All rights reserved.
* Copyright (c) 2004 Voltaire Corporation. All rights reserved.
* Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
+ * Copyright (c) 2019, Mellanox Technologies inc. All rights reserved.
*/
-#if !defined(IB_CM_H)
+#ifndef IB_CM_H
#define IB_CM_H
#include <rdma/ib_mad.h>
@@ -526,21 +500,6 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
u8 private_data_len);
/**
- * ib_send_cm_lap - Sends a load alternate path request.
- * @cm_id: Connection identifier associated with the load alternate path
- * message.
- * @alternate_path: A path record that identifies the alternate path to
- * load.
- * @private_data: Optional user-defined private data sent with the
- * load alternate path message.
- * @private_data_len: Size of the private data buffer, in bytes.
- */
-int ib_send_cm_lap(struct ib_cm_id *cm_id,
- struct sa_path_rec *alternate_path,
- const void *private_data,
- u8 private_data_len);
-
-/**
* ib_cm_init_qp_attr - Initializes the QP attributes for use in transitioning
* to a specified QP state.
* @cm_id: Communication identifier associated with the QP attributes to
@@ -560,25 +519,6 @@ int ib_cm_init_qp_attr(struct ib_cm_id *cm_id,
struct ib_qp_attr *qp_attr,
int *qp_attr_mask);
-/**
- * ib_send_cm_apr - Sends an alternate path response message in response to
- * a load alternate path request.
- * @cm_id: Connection identifier associated with the alternate path response.
- * @status: Reply status sent with the alternate path response.
- * @info: Optional additional information sent with the alternate path
- * response.
- * @info_length: Size of the additional information, in bytes.
- * @private_data: Optional user-defined private data sent with the
- * alternate path response message.
- * @private_data_len: Size of the private data buffer, in bytes.
- */
-int ib_send_cm_apr(struct ib_cm_id *cm_id,
- enum ib_cm_apr_status status,
- void *info,
- u8 info_length,
- const void *private_data,
- u8 private_data_len);
-
struct ib_cm_sidr_req_param {
struct sa_path_rec *path;
const struct ib_gid_attr *sgid_attr;
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index eea946fcc819..4e62650e2127 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -815,46 +815,6 @@ int ib_modify_mad(struct ib_mad_agent *mad_agent,
struct ib_mad_send_buf *send_buf, u32 timeout_ms);
/**
- * ib_redirect_mad_qp - Registers a QP for MAD services.
- * @qp: Reference to a QP that requires MAD services.
- * @rmpp_version: If set, indicates that the client will send
- * and receive MADs that contain the RMPP header for the given version.
- * If set to 0, indicates that RMPP is not used by this client.
- * @send_handler: The completion callback routine invoked after a send
- * request has completed.
- * @recv_handler: The completion callback routine invoked for a received
- * MAD.
- * @context: User specified context associated with the registration.
- *
- * Use of this call allows clients to use MAD services, such as RMPP,
- * on user-owned QPs. After calling this routine, users may send
- * MADs on the specified QP by calling ib_mad_post_send.
- */
-struct ib_mad_agent *ib_redirect_mad_qp(struct ib_qp *qp,
- u8 rmpp_version,
- ib_mad_send_handler send_handler,
- ib_mad_recv_handler recv_handler,
- void *context);
-
-/**
- * ib_process_mad_wc - Processes a work completion associated with a
- * MAD sent or received on a redirected QP.
- * @mad_agent: Specifies the registered MAD service using the redirected QP.
- * @wc: References a work completion associated with a sent or received
- * MAD segment.
- *
- * This routine is used to complete or continue processing on a MAD request.
- * If the work completion is associated with a send operation, calling
- * this routine is required to continue an RMPP transfer or to wait for a
- * corresponding response, if it is a request. If the work completion is
- * associated with a receive operation, calling this routine is required to
- * process an inbound or outbound RMPP transfer, or to match a response MAD
- * with its corresponding request.
- */
-int ib_process_mad_wc(struct ib_mad_agent *mad_agent,
- struct ib_wc *wc);
-
-/**
* ib_create_send_mad - Allocate and initialize a data buffer and work request
* for sending a MAD.
* @mad_agent: Specifies the registered MAD service to associate with the MAD.
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index a91b2af64ec4..e3518fd6b95b 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -69,8 +69,8 @@ static inline size_t ib_umem_num_pages(struct ib_umem *umem)
#ifdef CONFIG_INFINIBAND_USER_MEM
-struct ib_umem *ib_umem_get(struct ib_udata *udata, unsigned long addr,
- size_t size, int access, int dmasync);
+struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
+ size_t size, int access);
void ib_umem_release(struct ib_umem *umem);
int ib_umem_page_count(struct ib_umem *umem);
int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
@@ -83,9 +83,9 @@ unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
#include <linux/err.h>
-static inline struct ib_umem *ib_umem_get(struct ib_udata *udata,
+static inline struct ib_umem *ib_umem_get(struct ib_device *device,
unsigned long addr, size_t size,
- int access, int dmasync)
+ int access)
{
return ERR_PTR(-EINVAL);
}
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 253df1a1fa54..64314ff76612 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -35,11 +35,11 @@
#include <rdma/ib_umem.h>
#include <rdma/ib_verbs.h>
-#include <linux/interval_tree.h>
struct ib_umem_odp {
struct ib_umem umem;
- struct ib_ucontext_per_mm *per_mm;
+ struct mmu_interval_notifier notifier;
+ struct pid *tgid;
/*
* An array of the pages included in the on-demand paging umem.
@@ -62,13 +62,8 @@ struct ib_umem_odp {
struct mutex umem_mutex;
void *private; /* for the HW driver to use. */
- int notifiers_seq;
- int notifiers_count;
int npages;
- /* Tree tracking */
- struct interval_tree_node interval_tree;
-
/*
* An implicit odp umem cannot be DMA mapped, has 0 length, and serves
* only as an anchor for the driver to hold onto the per_mm. FIXME:
@@ -77,10 +72,7 @@ struct ib_umem_odp {
*/
bool is_implicit_odp;
- struct completion notifier_completion;
- int dying;
unsigned int page_shift;
- struct work_struct work;
};
static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
@@ -91,13 +83,13 @@ static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
/* Returns the first page of an ODP umem. */
static inline unsigned long ib_umem_start(struct ib_umem_odp *umem_odp)
{
- return umem_odp->interval_tree.start;
+ return umem_odp->notifier.interval_tree.start;
}
/* Returns the address of the page after the last one of an ODP umem. */
static inline unsigned long ib_umem_end(struct ib_umem_odp *umem_odp)
{
- return umem_odp->interval_tree.last + 1;
+ return umem_odp->notifier.interval_tree.last + 1;
}
static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp)
@@ -121,21 +113,15 @@ static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp)
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-struct ib_ucontext_per_mm {
- struct mmu_notifier mn;
- struct pid *tgid;
-
- struct rb_root_cached umem_tree;
- /* Protects umem_tree */
- struct rw_semaphore umem_rwsem;
-};
-
-struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata, unsigned long addr,
- size_t size, int access);
-struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_udata *udata,
+struct ib_umem_odp *
+ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size,
+ int access, const struct mmu_interval_notifier_ops *ops);
+struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
int access);
-struct ib_umem_odp *ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem,
- unsigned long addr, size_t size);
+struct ib_umem_odp *
+ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem, unsigned long addr,
+ size_t size,
+ const struct mmu_interval_notifier_ops *ops);
void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
@@ -145,55 +131,11 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
u64 bound);
-typedef int (*umem_call_back)(struct ib_umem_odp *item, u64 start, u64 end,
- void *cookie);
-/*
- * Call the callback on each ib_umem in the range. Returns the logical or of
- * the return values of the functions called.
- */
-int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
- u64 start, u64 end,
- umem_call_back cb,
- bool blockable, void *cookie);
-
-/*
- * Find first region intersecting with address range.
- * Return NULL if not found
- */
-static inline struct ib_umem_odp *
-rbt_ib_umem_lookup(struct rb_root_cached *root, u64 addr, u64 length)
-{
- struct interval_tree_node *node;
-
- node = interval_tree_iter_first(root, addr, addr + length - 1);
- if (!node)
- return NULL;
- return container_of(node, struct ib_umem_odp, interval_tree);
-
-}
-
-static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp,
- unsigned long mmu_seq)
-{
- /*
- * This code is strongly based on the KVM code from
- * mmu_notifier_retry. Should be called with
- * the relevant locks taken (umem_odp->umem_mutex
- * and the ucontext umem_mutex semaphore locked for read).
- */
-
- if (unlikely(umem_odp->notifiers_count))
- return 1;
- if (umem_odp->notifiers_seq != mmu_seq)
- return 1;
- return 0;
-}
-
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
-static inline struct ib_umem_odp *ib_umem_odp_get(struct ib_udata *udata,
- unsigned long addr,
- size_t size, int access)
+static inline struct ib_umem_odp *
+ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size,
+ int access, const struct mmu_interval_notifier_ops *ops)
{
return ERR_PTR(-EINVAL);
}
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index e7e733add99f..1f779fad3a1e 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -72,11 +72,16 @@
#define IB_FW_VERSION_NAME_MAX ETHTOOL_FWVERS_LEN
struct ib_umem_odp;
+struct ib_uqp_object;
+struct ib_usrq_object;
+struct ib_uwq_object;
extern struct workqueue_struct *ib_wq;
extern struct workqueue_struct *ib_comp_wq;
extern struct workqueue_struct *ib_comp_unbound_wq;
+struct ib_ucq_object;
+
__printf(3, 4) __cold
void ibdev_printk(const char *level, const struct ib_device *ibdev,
const char *format, ...);
@@ -445,6 +450,8 @@ struct ib_device_attr {
struct ib_tm_caps tm_caps;
struct ib_cq_caps cq_caps;
u64 max_dm_size;
+ /* Max entries for sgl for optimized performance per READ */
+ u32 max_sgl_rd;
};
enum ib_mtu {
@@ -1411,8 +1418,11 @@ enum ib_access_flags {
IB_ZERO_BASED = IB_UVERBS_ACCESS_ZERO_BASED,
IB_ACCESS_ON_DEMAND = IB_UVERBS_ACCESS_ON_DEMAND,
IB_ACCESS_HUGETLB = IB_UVERBS_ACCESS_HUGETLB,
+ IB_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_RELAXED_ORDERING,
- IB_ACCESS_SUPPORTED = ((IB_ACCESS_HUGETLB << 1) - 1)
+ IB_ACCESS_OPTIONAL = IB_UVERBS_ACCESS_OPTIONAL_RANGE,
+ IB_ACCESS_SUPPORTED =
+ ((IB_ACCESS_HUGETLB << 1) - 1) | IB_ACCESS_OPTIONAL,
};
/*
@@ -1471,6 +1481,7 @@ struct ib_ucontext {
* Implementation details of the RDMA core, don't use in drivers:
*/
struct rdma_restrack_entry res;
+ struct xarray mmap_xa;
};
struct ib_uobject {
@@ -1541,7 +1552,7 @@ enum ib_poll_context {
struct ib_cq {
struct ib_device *device;
- struct ib_uobject *uobject;
+ struct ib_ucq_object *uobject;
ib_comp_handler comp_handler;
void (*event_handler)(struct ib_event *, void *);
void *cq_context;
@@ -1555,6 +1566,11 @@ struct ib_cq {
};
struct workqueue_struct *comp_wq;
struct dim *dim;
+
+ /* updated only by trace points */
+ ktime_t timestamp;
+ bool interrupt;
+
/*
* Implementation details of the RDMA core, don't use in drivers:
*/
@@ -1564,7 +1580,7 @@ struct ib_cq {
struct ib_srq {
struct ib_device *device;
struct ib_pd *pd;
- struct ib_uobject *uobject;
+ struct ib_usrq_object *uobject;
void (*event_handler)(struct ib_event *, void *);
void *srq_context;
enum ib_srq_type srq_type;
@@ -1609,7 +1625,7 @@ enum ib_wq_state {
struct ib_wq {
struct ib_device *device;
- struct ib_uobject *uobject;
+ struct ib_uwq_object *uobject;
void *wq_context;
void (*event_handler)(struct ib_event *, void *);
struct ib_pd *pd;
@@ -1725,7 +1741,7 @@ struct ib_qp {
atomic_t usecnt;
struct list_head open_list;
struct ib_qp *real_qp;
- struct ib_uobject *uobject;
+ struct ib_uqp_object *uobject;
void (*event_handler)(struct ib_event *, void *);
void *qp_context;
/* sgid_attrs associated with the AV's */
@@ -2120,7 +2136,7 @@ struct ib_flow_action {
atomic_t usecnt;
};
-struct ib_mad_hdr;
+struct ib_mad;
struct ib_grh;
enum ib_process_mad_flags {
@@ -2144,11 +2160,6 @@ struct ib_port_cache {
enum ib_port_state port_state;
};
-struct ib_cache {
- rwlock_t lock;
- struct ib_event_handler event_handler;
-};
-
struct ib_port_immutable {
int pkey_tbl_len;
int gid_tbl_len;
@@ -2218,6 +2229,11 @@ struct rdma_netdev_alloc_params {
struct net_device *netdev, void *param);
};
+struct ib_odp_counters {
+ atomic64_t faults;
+ atomic64_t invalidations;
+};
+
struct ib_counters {
struct ib_device *device;
struct ib_uobject *uobject;
@@ -2251,6 +2267,21 @@ struct iw_cm_conn_param;
#define DECLARE_RDMA_OBJ_SIZE(ib_struct) size_t size_##ib_struct
+struct rdma_user_mmap_entry {
+ struct kref ref;
+ struct ib_ucontext *ucontext;
+ unsigned long start_pgoff;
+ size_t npages;
+ bool driver_removed;
+};
+
+/* Return the offset (in bytes) the user should pass to libc's mmap() */
+static inline u64
+rdma_user_mmap_get_offset(const struct rdma_user_mmap_entry *entry)
+{
+ return (u64)entry->start_pgoff << PAGE_SHIFT;
+}
+
/**
* struct ib_device_ops - InfiniBand device operations
* This structure defines all the InfiniBand device operations, providers will
@@ -2278,9 +2309,8 @@ struct ib_device_ops {
int (*process_mad)(struct ib_device *device, int process_mad_flags,
u8 port_num, const struct ib_wc *in_wc,
const struct ib_grh *in_grh,
- const struct ib_mad_hdr *in_mad, size_t in_mad_size,
- struct ib_mad_hdr *out_mad, size_t *out_mad_size,
- u16 *out_mad_pkey_index);
+ const struct ib_mad *in_mad, struct ib_mad *out_mad,
+ size_t *out_mad_size, u16 *out_mad_pkey_index);
int (*query_device)(struct ib_device *device,
struct ib_device_attr *device_attr,
struct ib_udata *udata);
@@ -2363,6 +2393,13 @@ struct ib_device_ops {
struct ib_udata *udata);
void (*dealloc_ucontext)(struct ib_ucontext *context);
int (*mmap)(struct ib_ucontext *context, struct vm_area_struct *vma);
+ /**
+ * This will be called once refcount of an entry in mmap_xa reaches
+ * zero. The type of the memory that was mapped may differ between
+ * entries and is opaque to the rdma_user_mmap interface.
+ * Therefore needs to be implemented by the driver in mmap_free.
+ */
+ void (*mmap_free)(struct rdma_user_mmap_entry *entry);
void (*disassociate_ucontext)(struct ib_ucontext *ibcontext);
int (*alloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
void (*dealloc_pd)(struct ib_pd *pd, struct ib_udata *udata);
@@ -2422,8 +2459,6 @@ struct ib_device_ops {
u64 iova);
int (*unmap_fmr)(struct list_head *fmr_list);
int (*dealloc_fmr)(struct ib_fmr *fmr);
- void (*invalidate_range)(struct ib_umem_odp *umem_odp,
- unsigned long start, unsigned long end);
int (*attach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
int (*detach_mcast)(struct ib_qp *qp, union ib_gid *gid, u16 lid);
struct ib_xrcd *(*alloc_xrcd)(struct ib_device *device,
@@ -2448,6 +2483,9 @@ struct ib_device_ops {
struct ifla_vf_info *ivf);
int (*get_vf_stats)(struct ib_device *device, int vf, u8 port,
struct ifla_vf_stats *stats);
+ int (*get_vf_guid)(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid);
int (*set_vf_guid)(struct ib_device *device, int vf, u8 port, u64 guid,
int type);
struct ib_wq *(*create_wq)(struct ib_pd *pd,
@@ -2563,6 +2601,13 @@ struct ib_device_ops {
*/
int (*counter_update_stats)(struct rdma_counter *counter);
+ /**
+ * Allows rdma drivers to add their own restrack attributes
+ * dumped via 'rdma stat' iproute2 command.
+ */
+ int (*fill_stat_entry)(struct sk_buff *msg,
+ struct rdma_restrack_entry *entry);
+
DECLARE_RDMA_OBJ_SIZE(ib_ah);
DECLARE_RDMA_OBJ_SIZE(ib_cq);
DECLARE_RDMA_OBJ_SIZE(ib_pd);
@@ -2590,13 +2635,18 @@ struct ib_device {
struct rcu_head rcu_head;
struct list_head event_handler_list;
- spinlock_t event_handler_lock;
+ /* Protects event_handler_list */
+ struct rw_semaphore event_handler_rwsem;
+
+ /* Protects QP's event_handler calls and open_qp list */
+ spinlock_t qp_open_list_lock;
struct rw_semaphore client_data_rwsem;
struct xarray client_data;
struct mutex unregistration_lock;
- struct ib_cache cache;
+ /* Synchronize GID, Pkey cache entries, subnet prefix, LMC */
+ rwlock_t cache_lock;
/**
* port_data is indexed by port number
*/
@@ -2789,18 +2839,26 @@ void ib_set_client_data(struct ib_device *device, struct ib_client *client,
void ib_set_device_ops(struct ib_device *device,
const struct ib_device_ops *ops);
-#if IS_ENABLED(CONFIG_INFINIBAND_USER_ACCESS)
int rdma_user_mmap_io(struct ib_ucontext *ucontext, struct vm_area_struct *vma,
- unsigned long pfn, unsigned long size, pgprot_t prot);
-#else
-static inline int rdma_user_mmap_io(struct ib_ucontext *ucontext,
- struct vm_area_struct *vma,
- unsigned long pfn, unsigned long size,
- pgprot_t prot)
-{
- return -EINVAL;
-}
-#endif
+ unsigned long pfn, unsigned long size, pgprot_t prot,
+ struct rdma_user_mmap_entry *entry);
+int rdma_user_mmap_entry_insert(struct ib_ucontext *ucontext,
+ struct rdma_user_mmap_entry *entry,
+ size_t length);
+int rdma_user_mmap_entry_insert_range(struct ib_ucontext *ucontext,
+ struct rdma_user_mmap_entry *entry,
+ size_t length, u32 min_pgoff,
+ u32 max_pgoff);
+
+struct rdma_user_mmap_entry *
+rdma_user_mmap_entry_get_pgoff(struct ib_ucontext *ucontext,
+ unsigned long pgoff);
+struct rdma_user_mmap_entry *
+rdma_user_mmap_entry_get(struct ib_ucontext *ucontext,
+ struct vm_area_struct *vma);
+void rdma_user_mmap_entry_put(struct rdma_user_mmap_entry *entry);
+
+void rdma_user_mmap_entry_remove(struct rdma_user_mmap_entry *entry);
static inline int ib_copy_from_udata(void *dest, struct ib_udata *udata, size_t len)
{
@@ -2897,7 +2955,7 @@ bool ib_modify_qp_is_ok(enum ib_qp_state cur_state, enum ib_qp_state next_state,
void ib_register_event_handler(struct ib_event_handler *event_handler);
void ib_unregister_event_handler(struct ib_event_handler *event_handler);
-void ib_dispatch_event(struct ib_event *event);
+void ib_dispatch_event(const struct ib_event *event);
int ib_query_port(struct ib_device *device,
u8 port_num, struct ib_port_attr *port_attr);
@@ -3303,6 +3361,9 @@ int ib_get_vf_config(struct ib_device *device, int vf, u8 port,
struct ifla_vf_info *info);
int ib_get_vf_stats(struct ib_device *device, int vf, u8 port,
struct ifla_vf_stats *stats);
+int ib_get_vf_guid(struct ib_device *device, int vf, u8 port,
+ struct ifla_vf_guid *node_guid,
+ struct ifla_vf_guid *port_guid);
int ib_set_vf_guid(struct ib_device *device, int vf, u8 port, u64 guid,
int type);
@@ -4043,9 +4104,7 @@ static inline void ib_dma_unmap_sg_attrs(struct ib_device *dev,
*/
static inline unsigned int ib_dma_max_seg_size(struct ib_device *dev)
{
- struct device_dma_parameters *p = dev->dma_device->dma_parms;
-
- return p ? p->max_segment_size : UINT_MAX;
+ return dma_get_max_seg_size(dev->dma_device);
}
/**
@@ -4107,6 +4166,15 @@ static inline void ib_dma_free_coherent(struct ib_device *dev,
dma_free_coherent(dev->dma_device, size, cpu_addr, dma_handle);
}
+/* ib_reg_user_mr - register a memory region for virtual addresses from kernel
+ * space. This function should be called when 'current' is the owning MM.
+ */
+struct ib_mr *ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
+ u64 virt_addr, int mr_access_flags);
+
+/* ib_advise_mr - give an advice about an address range in a memory region */
+int ib_advise_mr(struct ib_pd *pd, enum ib_uverbs_advise_mr_advice advice,
+ u32 flags, struct ib_sge *sg_list, u32 num_sge);
/**
* ib_dereg_mr_user - Deregisters a memory region and removes it from the
* HCA translation table.
@@ -4254,6 +4322,9 @@ static inline int ib_check_mr_access(int flags)
!(flags & IB_ACCESS_LOCAL_WRITE))
return -EINVAL;
+ if (flags & ~IB_ACCESS_SUPPORTED)
+ return -EINVAL;
+
return 0;
}
diff --git a/include/rdma/iba.h b/include/rdma/iba.h
new file mode 100644
index 000000000000..6a1115b02a0d
--- /dev/null
+++ b/include/rdma/iba.h
@@ -0,0 +1,146 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ */
+#ifndef _IBA_DEFS_H_
+#define _IBA_DEFS_H_
+
+#include <linux/kernel.h>
+#include <linux/bitfield.h>
+#include <asm/unaligned.h>
+
+static inline u32 _iba_get8(const u8 *ptr)
+{
+ return *ptr;
+}
+
+static inline void _iba_set8(u8 *ptr, u32 mask, u32 prep_value)
+{
+ *ptr = (*ptr & ~mask) | prep_value;
+}
+
+static inline u16 _iba_get16(const __be16 *ptr)
+{
+ return be16_to_cpu(*ptr);
+}
+
+static inline void _iba_set16(__be16 *ptr, u16 mask, u16 prep_value)
+{
+ *ptr = cpu_to_be16((be16_to_cpu(*ptr) & ~mask) | prep_value);
+}
+
+static inline u32 _iba_get32(const __be32 *ptr)
+{
+ return be32_to_cpu(*ptr);
+}
+
+static inline void _iba_set32(__be32 *ptr, u32 mask, u32 prep_value)
+{
+ *ptr = cpu_to_be32((be32_to_cpu(*ptr) & ~mask) | prep_value);
+}
+
+static inline u64 _iba_get64(const __be64 *ptr)
+{
+ /*
+ * The mads are constructed so that 32 bit and smaller are naturally
+ * aligned, everything larger has a max alignment of 4 bytes.
+ */
+ return be64_to_cpu(get_unaligned(ptr));
+}
+
+static inline void _iba_set64(__be64 *ptr, u64 mask, u64 prep_value)
+{
+ put_unaligned(cpu_to_be64((_iba_get64(ptr) & ~mask) | prep_value), ptr);
+}
+
+#define _IBA_SET(field_struct, field_offset, field_mask, num_bits, ptr, value) \
+ ({ \
+ field_struct *_ptr = ptr; \
+ _iba_set##num_bits((void *)_ptr + (field_offset), field_mask, \
+ FIELD_PREP(field_mask, value)); \
+ })
+#define IBA_SET(field, ptr, value) _IBA_SET(field, ptr, value)
+
+#define _IBA_GET_MEM_PTR(field_struct, field_offset, type, num_bits, ptr) \
+ ({ \
+ field_struct *_ptr = ptr; \
+ (type *)((void *)_ptr + (field_offset)); \
+ })
+#define IBA_GET_MEM_PTR(field, ptr) _IBA_GET_MEM_PTR(field, ptr)
+
+/* FIXME: A set should always set the entire field, meaning we should zero the trailing bytes */
+#define _IBA_SET_MEM(field_struct, field_offset, type, num_bits, ptr, in, \
+ bytes) \
+ ({ \
+ const type *_in_ptr = in; \
+ WARN_ON(bytes * 8 > num_bits); \
+ if (in && bytes) \
+ memcpy(_IBA_GET_MEM_PTR(field_struct, field_offset, \
+ type, num_bits, ptr), \
+ _in_ptr, bytes); \
+ })
+#define IBA_SET_MEM(field, ptr, in, bytes) _IBA_SET_MEM(field, ptr, in, bytes)
+
+#define _IBA_GET(field_struct, field_offset, field_mask, num_bits, ptr) \
+ ({ \
+ const field_struct *_ptr = ptr; \
+ (u##num_bits) FIELD_GET( \
+ field_mask, _iba_get##num_bits((const void *)_ptr + \
+ (field_offset))); \
+ })
+#define IBA_GET(field, ptr) _IBA_GET(field, ptr)
+
+#define _IBA_GET_MEM(field_struct, field_offset, type, num_bits, ptr, out, \
+ bytes) \
+ ({ \
+ type *_out_ptr = out; \
+ WARN_ON(bytes * 8 > num_bits); \
+ if (out && bytes) \
+ memcpy(_out_ptr, \
+ _IBA_GET_MEM_PTR(field_struct, field_offset, \
+ type, num_bits, ptr), \
+ bytes); \
+ })
+#define IBA_GET_MEM(field, ptr, out, bytes) _IBA_GET_MEM(field, ptr, out, bytes)
+
+/*
+ * The generated list becomes the parameters to the macros, the order is:
+ * - struct this applies to
+ * - starting offset of the max
+ * - GENMASK or GENMASK_ULL in CPU order
+ * - The width of data the mask operations should work on, in bits
+ */
+
+/*
+ * Extraction using a tabular description like table 106. bit_offset is from
+ * the Byte[Bit] notation.
+ */
+#define IBA_FIELD_BLOC(field_struct, byte_offset, bit_offset, num_bits) \
+ field_struct, byte_offset, \
+ GENMASK(7 - (bit_offset), 7 - (bit_offset) - (num_bits - 1)), \
+ 8
+#define IBA_FIELD8_LOC(field_struct, byte_offset, num_bits) \
+ IBA_FIELD_BLOC(field_struct, byte_offset, 0, num_bits)
+
+#define IBA_FIELD16_LOC(field_struct, byte_offset, num_bits) \
+ field_struct, (byte_offset)&0xFFFE, \
+ GENMASK(15 - (((byte_offset) % 2) * 8), \
+ 15 - (((byte_offset) % 2) * 8) - (num_bits - 1)), \
+ 16
+
+#define IBA_FIELD32_LOC(field_struct, byte_offset, num_bits) \
+ field_struct, (byte_offset)&0xFFFC, \
+ GENMASK(31 - (((byte_offset) % 4) * 8), \
+ 31 - (((byte_offset) % 4) * 8) - (num_bits - 1)), \
+ 32
+
+#define IBA_FIELD64_LOC(field_struct, byte_offset) \
+ field_struct, byte_offset, GENMASK_ULL(63, 0), 64
+/*
+ * In IBTA spec, everything that is more than 64bits is multiple
+ * of bytes without leftover bits.
+ */
+#define IBA_FIELD_MLOC(field_struct, byte_offset, num_bits, type) \
+ field_struct, byte_offset, type, num_bits
+
+#endif /* _IBA_DEFS_H_ */
diff --git a/include/rdma/ibta_vol1_c12.h b/include/rdma/ibta_vol1_c12.h
new file mode 100644
index 000000000000..269904425d3f
--- /dev/null
+++ b/include/rdma/ibta_vol1_c12.h
@@ -0,0 +1,213 @@
+/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
+/*
+ * Copyright (c) 2020, Mellanox Technologies inc. All rights reserved.
+ *
+ * This file is IBTA volume 1, chapter 12 declarations:
+ * CHAPTER 12: COMMUNICATION MANAGEMENT
+ */
+#ifndef _IBTA_VOL1_C12_H_
+#define _IBTA_VOL1_C12_H_
+
+#include <rdma/iba.h>
+
+#define CM_FIELD_BLOC(field_struct, byte_offset, bits_offset, width) \
+ IBA_FIELD_BLOC(field_struct, \
+ (byte_offset + sizeof(struct ib_mad_hdr)), bits_offset, \
+ width)
+#define CM_FIELD8_LOC(field_struct, byte_offset, width) \
+ IBA_FIELD8_LOC(field_struct, \
+ (byte_offset + sizeof(struct ib_mad_hdr)), width)
+#define CM_FIELD16_LOC(field_struct, byte_offset, width) \
+ IBA_FIELD16_LOC(field_struct, \
+ (byte_offset + sizeof(struct ib_mad_hdr)), width)
+#define CM_FIELD32_LOC(field_struct, byte_offset, width) \
+ IBA_FIELD32_LOC(field_struct, \
+ (byte_offset + sizeof(struct ib_mad_hdr)), width)
+#define CM_FIELD64_LOC(field_struct, byte_offset) \
+ IBA_FIELD64_LOC(field_struct, (byte_offset + sizeof(struct ib_mad_hdr)))
+#define CM_FIELD_MLOC(field_struct, byte_offset, width, type) \
+ IBA_FIELD_MLOC(field_struct, \
+ (byte_offset + sizeof(struct ib_mad_hdr)), width, type)
+#define CM_STRUCT(field_struct, total_len) \
+ field_struct \
+ { \
+ struct ib_mad_hdr hdr; \
+ u32 _data[(total_len) / 32 + \
+ BUILD_BUG_ON_ZERO((total_len) % 32 != 0)]; \
+ }
+
+/* Table 106 REQ Message Contents */
+#define CM_REQ_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_req_msg, 0, 32)
+#define CM_REQ_SERVICE_ID CM_FIELD64_LOC(struct cm_req_msg, 8)
+#define CM_REQ_LOCAL_CA_GUID CM_FIELD64_LOC(struct cm_req_msg, 16)
+#define CM_REQ_LOCAL_Q_KEY CM_FIELD32_LOC(struct cm_req_msg, 28, 32)
+#define CM_REQ_LOCAL_QPN CM_FIELD32_LOC(struct cm_req_msg, 32, 24)
+#define CM_REQ_RESPONDER_RESOURCES CM_FIELD8_LOC(struct cm_req_msg, 35, 8)
+#define CM_REQ_LOCAL_EECN CM_FIELD32_LOC(struct cm_req_msg, 36, 24)
+#define CM_REQ_INITIATOR_DEPTH CM_FIELD8_LOC(struct cm_req_msg, 39, 8)
+#define CM_REQ_REMOTE_EECN CM_FIELD32_LOC(struct cm_req_msg, 40, 24)
+#define CM_REQ_REMOTE_CM_RESPONSE_TIMEOUT \
+ CM_FIELD8_LOC(struct cm_req_msg, 43, 5)
+#define CM_REQ_TRANSPORT_SERVICE_TYPE CM_FIELD_BLOC(struct cm_req_msg, 43, 5, 2)
+#define CM_REQ_END_TO_END_FLOW_CONTROL \
+ CM_FIELD_BLOC(struct cm_req_msg, 43, 7, 1)
+#define CM_REQ_STARTING_PSN CM_FIELD32_LOC(struct cm_req_msg, 44, 24)
+#define CM_REQ_LOCAL_CM_RESPONSE_TIMEOUT CM_FIELD8_LOC(struct cm_req_msg, 47, 5)
+#define CM_REQ_RETRY_COUNT CM_FIELD_BLOC(struct cm_req_msg, 47, 5, 3)
+#define CM_REQ_PARTITION_KEY CM_FIELD16_LOC(struct cm_req_msg, 48, 16)
+#define CM_REQ_PATH_PACKET_PAYLOAD_MTU CM_FIELD8_LOC(struct cm_req_msg, 50, 4)
+#define CM_REQ_RDC_EXISTS CM_FIELD_BLOC(struct cm_req_msg, 50, 4, 1)
+#define CM_REQ_RNR_RETRY_COUNT CM_FIELD_BLOC(struct cm_req_msg, 50, 5, 3)
+#define CM_REQ_MAX_CM_RETRIES CM_FIELD8_LOC(struct cm_req_msg, 51, 4)
+#define CM_REQ_SRQ CM_FIELD_BLOC(struct cm_req_msg, 51, 4, 1)
+#define CM_REQ_EXTENDED_TRANSPORT_TYPE \
+ CM_FIELD_BLOC(struct cm_req_msg, 51, 5, 3)
+#define CM_REQ_PRIMARY_LOCAL_PORT_LID CM_FIELD16_LOC(struct cm_req_msg, 52, 16)
+#define CM_REQ_PRIMARY_REMOTE_PORT_LID CM_FIELD16_LOC(struct cm_req_msg, 54, 16)
+#define CM_REQ_PRIMARY_LOCAL_PORT_GID \
+ CM_FIELD_MLOC(struct cm_req_msg, 56, 128, union ib_gid)
+#define CM_REQ_PRIMARY_REMOTE_PORT_GID \
+ CM_FIELD_MLOC(struct cm_req_msg, 72, 128, union ib_gid)
+#define CM_REQ_PRIMARY_FLOW_LABEL CM_FIELD32_LOC(struct cm_req_msg, 88, 20)
+#define CM_REQ_PRIMARY_PACKET_RATE CM_FIELD_BLOC(struct cm_req_msg, 91, 2, 6)
+#define CM_REQ_PRIMARY_TRAFFIC_CLASS CM_FIELD8_LOC(struct cm_req_msg, 92, 8)
+#define CM_REQ_PRIMARY_HOP_LIMIT CM_FIELD8_LOC(struct cm_req_msg, 93, 8)
+#define CM_REQ_PRIMARY_SL CM_FIELD8_LOC(struct cm_req_msg, 94, 4)
+#define CM_REQ_PRIMARY_SUBNET_LOCAL CM_FIELD_BLOC(struct cm_req_msg, 94, 4, 1)
+#define CM_REQ_PRIMARY_LOCAL_ACK_TIMEOUT CM_FIELD8_LOC(struct cm_req_msg, 95, 5)
+#define CM_REQ_ALTERNATE_LOCAL_PORT_LID \
+ CM_FIELD16_LOC(struct cm_req_msg, 96, 16)
+#define CM_REQ_ALTERNATE_REMOTE_PORT_LID \
+ CM_FIELD16_LOC(struct cm_req_msg, 98, 16)
+#define CM_REQ_ALTERNATE_LOCAL_PORT_GID \
+ CM_FIELD_MLOC(struct cm_req_msg, 100, 128, union ib_gid)
+#define CM_REQ_ALTERNATE_REMOTE_PORT_GID \
+ CM_FIELD_MLOC(struct cm_req_msg, 116, 128, union ib_gid)
+#define CM_REQ_ALTERNATE_FLOW_LABEL CM_FIELD32_LOC(struct cm_req_msg, 132, 20)
+#define CM_REQ_ALTERNATE_PACKET_RATE CM_FIELD_BLOC(struct cm_req_msg, 135, 2, 6)
+#define CM_REQ_ALTERNATE_TRAFFIC_CLASS CM_FIELD8_LOC(struct cm_req_msg, 136, 8)
+#define CM_REQ_ALTERNATE_HOP_LIMIT CM_FIELD8_LOC(struct cm_req_msg, 137, 8)
+#define CM_REQ_ALTERNATE_SL CM_FIELD8_LOC(struct cm_req_msg, 138, 4)
+#define CM_REQ_ALTERNATE_SUBNET_LOCAL \
+ CM_FIELD_BLOC(struct cm_req_msg, 138, 4, 1)
+#define CM_REQ_ALTERNATE_LOCAL_ACK_TIMEOUT \
+ CM_FIELD8_LOC(struct cm_req_msg, 139, 5)
+#define CM_REQ_SAP_SUPPORTED CM_FIELD_BLOC(struct cm_req_msg, 139, 5, 1)
+#define CM_REQ_PRIVATE_DATA CM_FIELD_MLOC(struct cm_req_msg, 140, 736, void)
+CM_STRUCT(struct cm_req_msg, 140 * 8 + 736);
+
+/* Table 107 MRA Message Contents */
+#define CM_MRA_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_mra_msg, 0, 32)
+#define CM_MRA_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_mra_msg, 4, 32)
+#define CM_MRA_MESSAGE_MRAED CM_FIELD8_LOC(struct cm_mra_msg, 8, 2)
+#define CM_MRA_SERVICE_TIMEOUT CM_FIELD8_LOC(struct cm_mra_msg, 9, 5)
+#define CM_MRA_PRIVATE_DATA CM_FIELD_MLOC(struct cm_mra_msg, 10, 1776, void)
+CM_STRUCT(struct cm_mra_msg, 10 * 8 + 1776);
+
+/* Table 108 REJ Message Contents */
+#define CM_REJ_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_rej_msg, 0, 32)
+#define CM_REJ_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_rej_msg, 4, 32)
+#define CM_REJ_MESSAGE_REJECTED CM_FIELD8_LOC(struct cm_rej_msg, 8, 2)
+#define CM_REJ_REJECTED_INFO_LENGTH CM_FIELD8_LOC(struct cm_rej_msg, 9, 7)
+#define CM_REJ_REASON CM_FIELD16_LOC(struct cm_rej_msg, 10, 16)
+#define CM_REJ_ARI CM_FIELD_MLOC(struct cm_rej_msg, 12, 576, void)
+#define CM_REJ_PRIVATE_DATA CM_FIELD_MLOC(struct cm_rej_msg, 84, 1184, void)
+CM_STRUCT(struct cm_rej_msg, 84 * 8 + 1184);
+
+/* Table 110 REP Message Contents */
+#define CM_REP_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_rep_msg, 0, 32)
+#define CM_REP_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_rep_msg, 4, 32)
+#define CM_REP_LOCAL_Q_KEY CM_FIELD32_LOC(struct cm_rep_msg, 8, 32)
+#define CM_REP_LOCAL_QPN CM_FIELD32_LOC(struct cm_rep_msg, 12, 24)
+#define CM_REP_LOCAL_EE_CONTEXT_NUMBER CM_FIELD32_LOC(struct cm_rep_msg, 16, 24)
+#define CM_REP_STARTING_PSN CM_FIELD32_LOC(struct cm_rep_msg, 20, 24)
+#define CM_REP_RESPONDER_RESOURCES CM_FIELD8_LOC(struct cm_rep_msg, 24, 8)
+#define CM_REP_INITIATOR_DEPTH CM_FIELD8_LOC(struct cm_rep_msg, 25, 8)
+#define CM_REP_TARGET_ACK_DELAY CM_FIELD8_LOC(struct cm_rep_msg, 26, 5)
+#define CM_REP_FAILOVER_ACCEPTED CM_FIELD_BLOC(struct cm_rep_msg, 26, 5, 2)
+#define CM_REP_END_TO_END_FLOW_CONTROL \
+ CM_FIELD_BLOC(struct cm_rep_msg, 26, 7, 1)
+#define CM_REP_RNR_RETRY_COUNT CM_FIELD8_LOC(struct cm_rep_msg, 27, 3)
+#define CM_REP_SRQ CM_FIELD_BLOC(struct cm_rep_msg, 27, 3, 1)
+#define CM_REP_LOCAL_CA_GUID CM_FIELD64_LOC(struct cm_rep_msg, 28)
+#define CM_REP_PRIVATE_DATA CM_FIELD_MLOC(struct cm_rep_msg, 36, 1568, void)
+CM_STRUCT(struct cm_rep_msg, 36 * 8 + 1568);
+
+/* Table 111 RTU Message Contents */
+#define CM_RTU_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_rtu_msg, 0, 32)
+#define CM_RTU_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_rtu_msg, 4, 32)
+#define CM_RTU_PRIVATE_DATA CM_FIELD_MLOC(struct cm_rtu_msg, 8, 1792, void)
+CM_STRUCT(struct cm_rtu_msg, 8 * 8 + 1792);
+
+/* Table 112 DREQ Message Contents */
+#define CM_DREQ_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_dreq_msg, 0, 32)
+#define CM_DREQ_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_dreq_msg, 4, 32)
+#define CM_DREQ_REMOTE_QPN_EECN CM_FIELD32_LOC(struct cm_dreq_msg, 8, 24)
+#define CM_DREQ_PRIVATE_DATA CM_FIELD_MLOC(struct cm_dreq_msg, 12, 1760, void)
+CM_STRUCT(struct cm_dreq_msg, 12 * 8 + 1760);
+
+/* Table 113 DREP Message Contents */
+#define CM_DREP_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_drep_msg, 0, 32)
+#define CM_DREP_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_drep_msg, 4, 32)
+#define CM_DREP_PRIVATE_DATA CM_FIELD_MLOC(struct cm_drep_msg, 8, 1792, void)
+CM_STRUCT(struct cm_drep_msg, 8 * 8 + 1792);
+
+/* Table 115 LAP Message Contents */
+#define CM_LAP_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_lap_msg, 0, 32)
+#define CM_LAP_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_lap_msg, 4, 32)
+#define CM_LAP_REMOTE_QPN_EECN CM_FIELD32_LOC(struct cm_lap_msg, 12, 24)
+#define CM_LAP_REMOTE_CM_RESPONSE_TIMEOUT \
+ CM_FIELD8_LOC(struct cm_lap_msg, 15, 5)
+#define CM_LAP_ALTERNATE_LOCAL_PORT_LID \
+ CM_FIELD16_LOC(struct cm_lap_msg, 20, 16)
+#define CM_LAP_ALTERNATE_REMOTE_PORT_LID \
+ CM_FIELD16_LOC(struct cm_lap_msg, 22, 16)
+#define CM_LAP_ALTERNATE_LOCAL_PORT_GID \
+ CM_FIELD_MLOC(struct cm_lap_msg, 24, 128, union ib_gid)
+#define CM_LAP_ALTERNATE_REMOTE_PORT_GID \
+ CM_FIELD_MLOC(struct cm_lap_msg, 40, 128, union ib_gid)
+#define CM_LAP_ALTERNATE_FLOW_LABEL CM_FIELD32_LOC(struct cm_lap_msg, 56, 20)
+#define CM_LAP_ALTERNATE_TRAFFIC_CLASS CM_FIELD8_LOC(struct cm_lap_msg, 59, 8)
+#define CM_LAP_ALTERNATE_HOP_LIMIT CM_FIELD8_LOC(struct cm_lap_msg, 60, 8)
+#define CM_LAP_ALTERNATE_PACKET_RATE CM_FIELD_BLOC(struct cm_lap_msg, 61, 2, 6)
+#define CM_LAP_ALTERNATE_SL CM_FIELD8_LOC(struct cm_lap_msg, 62, 4)
+#define CM_LAP_ALTERNATE_SUBNET_LOCAL CM_FIELD_BLOC(struct cm_lap_msg, 62, 4, 1)
+#define CM_LAP_ALTERNATE_LOCAL_ACK_TIMEOUT \
+ CM_FIELD8_LOC(struct cm_lap_msg, 63, 5)
+#define CM_LAP_PRIVATE_DATA CM_FIELD_MLOC(struct cm_lap_msg, 64, 1344, void)
+CM_STRUCT(struct cm_lap_msg, 64 * 8 + 1344);
+
+/* Table 116 APR Message Contents */
+#define CM_APR_LOCAL_COMM_ID CM_FIELD32_LOC(struct cm_apr_msg, 0, 32)
+#define CM_APR_REMOTE_COMM_ID CM_FIELD32_LOC(struct cm_apr_msg, 4, 32)
+#define CM_APR_ADDITIONAL_INFORMATION_LENGTH \
+ CM_FIELD8_LOC(struct cm_apr_msg, 8, 8)
+#define CM_APR_AR_STATUS CM_FIELD8_LOC(struct cm_apr_msg, 9, 8)
+#define CM_APR_ADDITIONAL_INFORMATION \
+ CM_FIELD_MLOC(struct cm_apr_msg, 12, 576, void)
+#define CM_APR_PRIVATE_DATA CM_FIELD_MLOC(struct cm_apr_msg, 84, 1184, void)
+CM_STRUCT(struct cm_apr_msg, 84 * 8 + 1184);
+
+/* Table 119 SIDR_REQ Message Contents */
+#define CM_SIDR_REQ_REQUESTID CM_FIELD32_LOC(struct cm_sidr_req_msg, 0, 32)
+#define CM_SIDR_REQ_PARTITION_KEY CM_FIELD16_LOC(struct cm_sidr_req_msg, 4, 16)
+#define CM_SIDR_REQ_SERVICEID CM_FIELD64_LOC(struct cm_sidr_req_msg, 8)
+#define CM_SIDR_REQ_PRIVATE_DATA \
+ CM_FIELD_MLOC(struct cm_sidr_req_msg, 16, 1728, void)
+CM_STRUCT(struct cm_sidr_req_msg, 16 * 8 + 1728);
+
+/* Table 120 SIDR_REP Message Contents */
+#define CM_SIDR_REP_REQUESTID CM_FIELD32_LOC(struct cm_sidr_rep_msg, 0, 32)
+#define CM_SIDR_REP_STATUS CM_FIELD8_LOC(struct cm_sidr_rep_msg, 4, 8)
+#define CM_SIDR_REP_ADDITIONAL_INFORMATION_LENGTH \
+ CM_FIELD8_LOC(struct cm_sidr_rep_msg, 5, 8)
+#define CM_SIDR_REP_QPN CM_FIELD32_LOC(struct cm_sidr_rep_msg, 8, 24)
+#define CM_SIDR_REP_SERVICEID CM_FIELD64_LOC(struct cm_sidr_rep_msg, 12)
+#define CM_SIDR_REP_Q_KEY CM_FIELD32_LOC(struct cm_sidr_rep_msg, 20, 32)
+#define CM_SIDR_REP_ADDITIONAL_INFORMATION \
+ CM_FIELD_MLOC(struct cm_sidr_rep_msg, 24, 576, void)
+#define CM_SIDR_REP_PRIVATE_DATA \
+ CM_FIELD_MLOC(struct cm_sidr_rep_msg, 96, 1088, void)
+CM_STRUCT(struct cm_sidr_rep_msg, 96 * 8 + 1088);
+
+#endif /* _IBTA_VOL1_C12_H_ */
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index b550ae89bf85..0d5c70e2d8ab 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -640,34 +640,14 @@ static inline int rvt_cmp_msn(u32 a, u32 b)
return (((int)a) - ((int)b)) << 8;
}
-/**
- * rvt_compute_aeth - compute the AETH (syndrome + MSN)
- * @qp: the queue pair to compute the AETH for
- *
- * Returns the AETH.
- */
__be32 rvt_compute_aeth(struct rvt_qp *qp);
-/**
- * rvt_get_credit - flush the send work queue of a QP
- * @qp: the qp who's send work queue to flush
- * @aeth: the Acknowledge Extended Transport Header
- *
- * The QP s_lock should be held.
- */
void rvt_get_credit(struct rvt_qp *qp, u32 aeth);
-/**
- * rvt_restart_sge - rewind the sge state for a wqe
- * @ss: the sge state pointer
- * @wqe: the wqe to rewind
- * @len: the data length from the start of the wqe in bytes
- *
- * Returns the remaining data length.
- */
u32 rvt_restart_sge(struct rvt_sge_state *ss, struct rvt_swqe *wqe, u32 len);
/**
+ * rvt_div_round_up_mtu - round up divide
* @qp - the qp pair
* @len - the length
*
diff --git a/include/rdma/restrack.h b/include/rdma/restrack.h
index 83df1ec6664e..7682d1bcf789 100644
--- a/include/rdma/restrack.h
+++ b/include/rdma/restrack.h
@@ -156,6 +156,11 @@ int rdma_nl_put_driver_u32_hex(struct sk_buff *msg, const char *name,
int rdma_nl_put_driver_u64(struct sk_buff *msg, const char *name, u64 value);
int rdma_nl_put_driver_u64_hex(struct sk_buff *msg, const char *name,
u64 value);
+int rdma_nl_put_driver_string(struct sk_buff *msg, const char *name,
+ const char *str);
+int rdma_nl_stat_hwcounter_entry(struct sk_buff *msg, const char *name,
+ u64 value);
+
struct rdma_restrack_entry *rdma_restrack_get_byid(struct ib_device *dev,
enum rdma_restrack_type type,
u32 id);
diff --git a/include/rdma/uverbs_named_ioctl.h b/include/rdma/uverbs_named_ioctl.h
index 3447bfe356d6..6ae6cf8e4c2e 100644
--- a/include/rdma/uverbs_named_ioctl.h
+++ b/include/rdma/uverbs_named_ioctl.h
@@ -76,7 +76,7 @@
#define DECLARE_UVERBS_NAMED_OBJECT(_object_id, _type_attrs, ...) \
static const struct uverbs_method_def *const UVERBS_OBJECT_METHODS( \
_object_id)[] = { __VA_ARGS__ }; \
- const struct uverbs_object_def UVERBS_OBJECT(_object_id) = { \
+ static const struct uverbs_object_def UVERBS_OBJECT(_object_id) = { \
.id = _object_id, \
.type_attrs = &_type_attrs, \
.num_methods = ARRAY_SIZE(UVERBS_OBJECT_METHODS(_object_id)), \
@@ -88,10 +88,10 @@
* identify all uapi methods with a (object,method) tuple. However, they have
* no type pointer.
*/
-#define DECLARE_UVERBS_GLOBAL_METHODS(_object_id, ...) \
+#define DECLARE_UVERBS_GLOBAL_METHODS(_object_id, ...) \
static const struct uverbs_method_def *const UVERBS_OBJECT_METHODS( \
_object_id)[] = { __VA_ARGS__ }; \
- const struct uverbs_object_def UVERBS_OBJECT(_object_id) = { \
+ static const struct uverbs_object_def UVERBS_OBJECT(_object_id) = { \
.id = _object_id, \
.num_methods = ARRAY_SIZE(UVERBS_OBJECT_METHODS(_object_id)), \
.methods = &UVERBS_OBJECT_METHODS(_object_id) \
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
index 05eabfd5d0d3..1b28ce1aba07 100644
--- a/include/rdma/uverbs_std_types.h
+++ b/include/rdma/uverbs_std_types.h
@@ -104,16 +104,6 @@ static inline void uobj_put_write(struct ib_uobject *uobj)
rdma_lookup_put_uobject(uobj, UVERBS_LOOKUP_WRITE);
}
-static inline int __must_check
-uobj_alloc_commit(struct ib_uobject *uobj, struct uverbs_attr_bundle *attrs)
-{
- int ret = rdma_alloc_commit_uobject(uobj, attrs);
-
- if (ret)
- return ret;
- return 0;
-}
-
static inline void uobj_alloc_abort(struct ib_uobject *uobj,
struct uverbs_attr_bundle *attrs)
{
@@ -124,8 +114,7 @@ static inline struct ib_uobject *
__uobj_alloc(const struct uverbs_api_object *obj,
struct uverbs_attr_bundle *attrs, struct ib_device **ib_dev)
{
- struct ib_uobject *uobj =
- rdma_alloc_begin_uobject(obj, attrs->ufile, attrs);
+ struct ib_uobject *uobj = rdma_alloc_begin_uobject(obj, attrs);
if (!IS_ERR(uobj))
*ib_dev = attrs->context->device;
diff --git a/include/rdma/uverbs_types.h b/include/rdma/uverbs_types.h
index d57a5ba00c74..f1cbdae67250 100644
--- a/include/rdma/uverbs_types.h
+++ b/include/rdma/uverbs_types.h
@@ -83,9 +83,9 @@ enum rdma_lookup_mode {
*/
struct uverbs_obj_type_class {
struct ib_uobject *(*alloc_begin)(const struct uverbs_api_object *obj,
- struct ib_uverbs_file *ufile);
+ struct uverbs_attr_bundle *attrs);
/* This consumes the kref on uobj */
- int (*alloc_commit)(struct ib_uobject *uobj);
+ void (*alloc_commit)(struct ib_uobject *uobj);
/* This does not consume the kref on uobj */
void (*alloc_abort)(struct ib_uobject *uobj);
@@ -98,7 +98,6 @@ struct uverbs_obj_type_class {
enum rdma_remove_reason why,
struct uverbs_attr_bundle *attrs);
void (*remove_handle)(struct ib_uobject *uobj);
- u8 needs_kfree_rcu;
};
struct uverbs_obj_type {
@@ -138,23 +137,34 @@ struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_api_object *obj,
void rdma_lookup_put_uobject(struct ib_uobject *uobj,
enum rdma_lookup_mode mode);
struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_api_object *obj,
- struct ib_uverbs_file *ufile,
struct uverbs_attr_bundle *attrs);
void rdma_alloc_abort_uobject(struct ib_uobject *uobj,
struct uverbs_attr_bundle *attrs);
-int __must_check rdma_alloc_commit_uobject(struct ib_uobject *uobj,
- struct uverbs_attr_bundle *attrs);
+void rdma_alloc_commit_uobject(struct ib_uobject *uobj,
+ struct uverbs_attr_bundle *attrs);
+
+/*
+ * uverbs_uobject_get is called in order to increase the reference count on
+ * an uobject. This is useful when a handler wants to keep the uobject's memory
+ * alive, regardless if this uobject is still alive in the context's objects
+ * repository. Objects are put via uverbs_uobject_put.
+ */
+static inline void uverbs_uobject_get(struct ib_uobject *uobject)
+{
+ kref_get(&uobject->ref);
+}
+void uverbs_uobject_put(struct ib_uobject *uobject);
struct uverbs_obj_fd_type {
/*
* In fd based objects, uverbs_obj_type_ops points to generic
* fd operations. In order to specialize the underlying types (e.g.
* completion_channel), we use fops, name and flags for fd creation.
- * context_closed is called when the context is closed either when
- * the driver is removed or the process terminated.
+ * destroy_object is called when the uobject is to be destroyed,
+ * because the driver is removed or the FD is closed.
*/
struct uverbs_obj_type type;
- int (*context_closed)(struct ib_uobject *uobj,
+ int (*destroy_object)(struct ib_uobject *uobj,
enum rdma_remove_reason why);
const struct file_operations *fops;
const char *name;
@@ -163,11 +173,11 @@ struct uverbs_obj_fd_type {
extern const struct uverbs_obj_type_class uverbs_idr_class;
extern const struct uverbs_obj_type_class uverbs_fd_class;
-void uverbs_close_fd(struct file *f);
+int uverbs_uobject_fd_release(struct inode *inode, struct file *filp);
#define UVERBS_BUILD_BUG_ON(cond) (sizeof(char[1 - 2 * !!(cond)]) - \
sizeof(char))
-#define UVERBS_TYPE_ALLOC_FD(_obj_size, _context_closed, _fops, _name, _flags)\
+#define UVERBS_TYPE_ALLOC_FD(_obj_size, _destroy_object, _fops, _name, _flags) \
((&((const struct uverbs_obj_fd_type) \
{.type = { \
.type_class = &uverbs_fd_class, \
@@ -175,7 +185,7 @@ void uverbs_close_fd(struct file *f);
UVERBS_BUILD_BUG_ON((_obj_size) < \
sizeof(struct ib_uobject)), \
}, \
- .context_closed = _context_closed, \
+ .destroy_object = _destroy_object, \
.fops = _fops, \
.name = _name, \
.flags = _flags}))->type)
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 91bd749a02f7..a2849bb9cd19 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -63,6 +63,7 @@ struct scsi_pointer {
/* for scmd->state */
#define SCMD_STATE_COMPLETE 0
+#define SCMD_STATE_INFLIGHT 1
struct scsi_cmnd {
struct scsi_request req;
@@ -190,12 +191,12 @@ static inline unsigned scsi_bufflen(struct scsi_cmnd *cmd)
return cmd->sdb.length;
}
-static inline void scsi_set_resid(struct scsi_cmnd *cmd, int resid)
+static inline void scsi_set_resid(struct scsi_cmnd *cmd, unsigned int resid)
{
cmd->req.resid_len = resid;
}
-static inline int scsi_get_resid(struct scsi_cmnd *cmd)
+static inline unsigned int scsi_get_resid(struct scsi_cmnd *cmd)
{
return cmd->req.resid_len;
}
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 202f4d6a4342..f8312a3e5b42 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -140,8 +140,10 @@ struct scsi_device {
const char * rev; /* ... "nullnullnullnull" before scan */
#define SCSI_VPD_PG_LEN 255
+ struct scsi_vpd __rcu *vpd_pg0;
struct scsi_vpd __rcu *vpd_pg83;
struct scsi_vpd __rcu *vpd_pg80;
+ struct scsi_vpd __rcu *vpd_pg89;
unsigned char current_tag; /* current tag */
struct scsi_target *sdev_target; /* used only for single_lun */
@@ -170,6 +172,7 @@ struct scsi_device {
* because we did a bus reset. */
unsigned use_10_for_rw:1; /* first try 10-byte read / write */
unsigned use_10_for_ms:1; /* first try 10-byte mode sense/select */
+ unsigned set_dbd_for_ms:1; /* Set "DBD" field in mode sense */
unsigned no_report_opcodes:1; /* no REPORT SUPPORTED OPERATION CODES */
unsigned no_write_same:1; /* no WRITE SAME command */
unsigned use_16_for_rw:1; /* Use read/write(16) over read/write(10) */
@@ -199,7 +202,8 @@ struct scsi_device {
unsigned broken_fua:1; /* Don't set FUA bit */
unsigned lun_in_cdb:1; /* Store LUN bits in CDB[1] */
unsigned unmap_limit_for_ws:1; /* Use the UNMAP limit for WRITE SAME */
-
+ unsigned rpm_autosuspend:1; /* Enable runtime autosuspend at device
+ * creation time */
atomic_t disk_events_disable_depth; /* disable depth for disk events */
DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 31e0d6ca1eba..f577647bf5f2 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -23,19 +23,6 @@ struct scsi_host_cmd_pool;
struct scsi_transport_template;
-/*
- * The various choices mean:
- * NONE: Self evident. Host adapter is not capable of scatter-gather.
- * ALL: Means that the host adapter module can do scatter-gather,
- * and that there is no limit to the size of the table to which
- * we scatter/gather data. The value we set here is the maximum
- * single element sglist. To use chained sglists, the adapter
- * has to set a value beyond ALL (and correctly use the chain
- * handling API.
- * Anything else: Indicates the maximum number of chains that can be
- * used in one scatter-gather request.
- */
-#define SG_NONE 0
#define SG_ALL SG_CHUNK_SIZE
#define MODE_UNKNOWN 0x00
@@ -345,7 +332,7 @@ struct scsi_host_template {
/*
* This determines if we will use a non-interrupt driven
* or an interrupt driven scheme. It is set to the maximum number
- * of simultaneous commands a given host adapter will accept.
+ * of simultaneous commands a single hw queue in HBA will accept.
*/
int can_queue;
@@ -486,6 +473,9 @@ struct scsi_host_template {
*/
unsigned int cmd_size;
struct scsi_host_cmd_pool *cmd_pool;
+
+ /* Delay for runtime autosuspend */
+ int rpm_autosuspend_delay;
};
/*
@@ -551,7 +541,6 @@ struct Scsi_Host {
/* Area to keep a shared tag map */
struct blk_mq_tag_set tag_set;
- atomic_t host_busy; /* commands actually active on low-level */
atomic_t host_blocked;
unsigned int host_failed; /* commands that failed.
diff --git a/include/scsi/scsi_ioctl.h b/include/scsi/scsi_ioctl.h
index 5101e987c0ef..4fe69d863b5d 100644
--- a/include/scsi/scsi_ioctl.h
+++ b/include/scsi/scsi_ioctl.h
@@ -44,6 +44,7 @@ typedef struct scsi_fctargaddress {
int scsi_ioctl_block_when_processing_errors(struct scsi_device *sdev,
int cmd, bool ndelay);
extern int scsi_ioctl(struct scsi_device *, int, void __user *);
+extern int scsi_compat_ioctl(struct scsi_device *sdev, int cmd, void __user *arg);
#endif /* __KERNEL__ */
#endif /* _SCSI_IOCTL_H */
diff --git a/include/scsi/scsicam.h b/include/scsi/scsicam.h
index 57c729254569..08edd603e521 100644
--- a/include/scsi/scsicam.h
+++ b/include/scsi/scsicam.h
@@ -13,8 +13,7 @@
#ifndef SCSICAM_H
#define SCSICAM_H
-extern int scsicam_bios_param (struct block_device *bdev, sector_t capacity, int *ip);
-extern int scsi_partsize(unsigned char *buf, unsigned long capacity,
- unsigned int *cyls, unsigned int *hds, unsigned int *secs);
-extern unsigned char *scsi_bios_ptable(struct block_device *bdev);
+int scsicam_bios_param(struct block_device *bdev, sector_t capacity, int *ip);
+bool scsi_partsize(struct block_device *bdev, sector_t capacity, int geom[3]);
+unsigned char *scsi_bios_ptable(struct block_device *bdev);
#endif /* def SCSICAM_H */
diff --git a/include/scsi/sg.h b/include/scsi/sg.h
index f91bcca604e4..29c7ad04d2e2 100644
--- a/include/scsi/sg.h
+++ b/include/scsi/sg.h
@@ -68,6 +68,36 @@ typedef struct sg_io_hdr
unsigned int info; /* [o] auxiliary information */
} sg_io_hdr_t; /* 64 bytes long (on i386) */
+#if defined(__KERNEL__)
+#include <linux/compat.h>
+
+struct compat_sg_io_hdr {
+ compat_int_t interface_id; /* [i] 'S' for SCSI generic (required) */
+ compat_int_t dxfer_direction; /* [i] data transfer direction */
+ unsigned char cmd_len; /* [i] SCSI command length ( <= 16 bytes) */
+ unsigned char mx_sb_len; /* [i] max length to write to sbp */
+ unsigned short iovec_count; /* [i] 0 implies no scatter gather */
+ compat_uint_t dxfer_len; /* [i] byte count of data transfer */
+ compat_uint_t dxferp; /* [i], [*io] points to data transfer memory
+ or scatter gather list */
+ compat_uptr_t cmdp; /* [i], [*i] points to command to perform */
+ compat_uptr_t sbp; /* [i], [*o] points to sense_buffer memory */
+ compat_uint_t timeout; /* [i] MAX_UINT->no timeout (unit: millisec) */
+ compat_uint_t flags; /* [i] 0 -> default, see SG_FLAG... */
+ compat_int_t pack_id; /* [i->o] unused internally (normally) */
+ compat_uptr_t usr_ptr; /* [i->o] unused internally */
+ unsigned char status; /* [o] scsi status */
+ unsigned char masked_status; /* [o] shifted, masked scsi status */
+ unsigned char msg_status; /* [o] messaging level data (optional) */
+ unsigned char sb_len_wr; /* [o] byte count actually written to sbp */
+ unsigned short host_status; /* [o] errors from host adapter */
+ unsigned short driver_status; /* [o] errors from software driver */
+ compat_int_t resid; /* [o] dxfer_len - actual_transferred */
+ compat_uint_t duration; /* [o] time taken by cmd (unit: millisec) */
+ compat_uint_t info; /* [o] auxiliary information */
+};
+#endif
+
#define SG_INTERFACE_ID_ORIG 'S'
/* Use negative values to flag difference from original sg_header structure */
diff --git a/include/soc/fsl/cpm.h b/include/soc/fsl/cpm.h
new file mode 100644
index 000000000000..4c24ea8209bb
--- /dev/null
+++ b/include/soc/fsl/cpm.h
@@ -0,0 +1,171 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __CPM_H
+#define __CPM_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <soc/fsl/qe/qe.h>
+
+/*
+ * SPI Parameter RAM common to QE and CPM.
+ */
+struct spi_pram {
+ __be16 rbase; /* Rx Buffer descriptor base address */
+ __be16 tbase; /* Tx Buffer descriptor base address */
+ u8 rfcr; /* Rx function code */
+ u8 tfcr; /* Tx function code */
+ __be16 mrblr; /* Max receive buffer length */
+ __be32 rstate; /* Internal */
+ __be32 rdp; /* Internal */
+ __be16 rbptr; /* Internal */
+ __be16 rbc; /* Internal */
+ __be32 rxtmp; /* Internal */
+ __be32 tstate; /* Internal */
+ __be32 tdp; /* Internal */
+ __be16 tbptr; /* Internal */
+ __be16 tbc; /* Internal */
+ __be32 txtmp; /* Internal */
+ __be32 res; /* Tx temp. */
+ __be16 rpbase; /* Relocation pointer (CPM1 only) */
+ __be16 res1; /* Reserved */
+};
+
+/*
+ * USB Controller pram common to QE and CPM.
+ */
+struct usb_ctlr {
+ u8 usb_usmod;
+ u8 usb_usadr;
+ u8 usb_uscom;
+ u8 res1[1];
+ __be16 usb_usep[4];
+ u8 res2[4];
+ __be16 usb_usber;
+ u8 res3[2];
+ __be16 usb_usbmr;
+ u8 res4[1];
+ u8 usb_usbs;
+ /* Fields down below are QE-only */
+ __be16 usb_ussft;
+ u8 res5[2];
+ __be16 usb_usfrn;
+ u8 res6[0x22];
+} __attribute__ ((packed));
+
+/*
+ * Function code bits, usually generic to devices.
+ */
+#ifdef CONFIG_CPM1
+#define CPMFCR_GBL ((u_char)0x00) /* Flag doesn't exist in CPM1 */
+#define CPMFCR_TC2 ((u_char)0x00) /* Flag doesn't exist in CPM1 */
+#define CPMFCR_DTB ((u_char)0x00) /* Flag doesn't exist in CPM1 */
+#define CPMFCR_BDB ((u_char)0x00) /* Flag doesn't exist in CPM1 */
+#else
+#define CPMFCR_GBL ((u_char)0x20) /* Set memory snooping */
+#define CPMFCR_TC2 ((u_char)0x04) /* Transfer code 2 value */
+#define CPMFCR_DTB ((u_char)0x02) /* Use local bus for data when set */
+#define CPMFCR_BDB ((u_char)0x01) /* Use local bus for BD when set */
+#endif
+#define CPMFCR_EB ((u_char)0x10) /* Set big endian byte order */
+
+/* Opcodes common to CPM1 and CPM2
+*/
+#define CPM_CR_INIT_TRX ((ushort)0x0000)
+#define CPM_CR_INIT_RX ((ushort)0x0001)
+#define CPM_CR_INIT_TX ((ushort)0x0002)
+#define CPM_CR_HUNT_MODE ((ushort)0x0003)
+#define CPM_CR_STOP_TX ((ushort)0x0004)
+#define CPM_CR_GRA_STOP_TX ((ushort)0x0005)
+#define CPM_CR_RESTART_TX ((ushort)0x0006)
+#define CPM_CR_CLOSE_RX_BD ((ushort)0x0007)
+#define CPM_CR_SET_GADDR ((ushort)0x0008)
+#define CPM_CR_SET_TIMER ((ushort)0x0008)
+#define CPM_CR_STOP_IDMA ((ushort)0x000b)
+
+/* Buffer descriptors used by many of the CPM protocols. */
+typedef struct cpm_buf_desc {
+ ushort cbd_sc; /* Status and Control */
+ ushort cbd_datlen; /* Data length in buffer */
+ uint cbd_bufaddr; /* Buffer address in host memory */
+} cbd_t;
+
+/* Buffer descriptor control/status used by serial
+ */
+
+#define BD_SC_EMPTY (0x8000) /* Receive is empty */
+#define BD_SC_READY (0x8000) /* Transmit is ready */
+#define BD_SC_WRAP (0x2000) /* Last buffer descriptor */
+#define BD_SC_INTRPT (0x1000) /* Interrupt on change */
+#define BD_SC_LAST (0x0800) /* Last buffer in frame */
+#define BD_SC_TC (0x0400) /* Transmit CRC */
+#define BD_SC_CM (0x0200) /* Continuous mode */
+#define BD_SC_ID (0x0100) /* Rec'd too many idles */
+#define BD_SC_P (0x0100) /* xmt preamble */
+#define BD_SC_BR (0x0020) /* Break received */
+#define BD_SC_FR (0x0010) /* Framing error */
+#define BD_SC_PR (0x0008) /* Parity error */
+#define BD_SC_NAK (0x0004) /* NAK - did not respond */
+#define BD_SC_OV (0x0002) /* Overrun */
+#define BD_SC_UN (0x0002) /* Underrun */
+#define BD_SC_CD (0x0001) /* */
+#define BD_SC_CL (0x0001) /* Collision */
+
+/* Buffer descriptor control/status used by Ethernet receive.
+ * Common to SCC and FCC.
+ */
+#define BD_ENET_RX_EMPTY (0x8000)
+#define BD_ENET_RX_WRAP (0x2000)
+#define BD_ENET_RX_INTR (0x1000)
+#define BD_ENET_RX_LAST (0x0800)
+#define BD_ENET_RX_FIRST (0x0400)
+#define BD_ENET_RX_MISS (0x0100)
+#define BD_ENET_RX_BC (0x0080) /* FCC Only */
+#define BD_ENET_RX_MC (0x0040) /* FCC Only */
+#define BD_ENET_RX_LG (0x0020)
+#define BD_ENET_RX_NO (0x0010)
+#define BD_ENET_RX_SH (0x0008)
+#define BD_ENET_RX_CR (0x0004)
+#define BD_ENET_RX_OV (0x0002)
+#define BD_ENET_RX_CL (0x0001)
+#define BD_ENET_RX_STATS (0x01ff) /* All status bits */
+
+/* Buffer descriptor control/status used by Ethernet transmit.
+ * Common to SCC and FCC.
+ */
+#define BD_ENET_TX_READY (0x8000)
+#define BD_ENET_TX_PAD (0x4000)
+#define BD_ENET_TX_WRAP (0x2000)
+#define BD_ENET_TX_INTR (0x1000)
+#define BD_ENET_TX_LAST (0x0800)
+#define BD_ENET_TX_TC (0x0400)
+#define BD_ENET_TX_DEF (0x0200)
+#define BD_ENET_TX_HB (0x0100)
+#define BD_ENET_TX_LC (0x0080)
+#define BD_ENET_TX_RL (0x0040)
+#define BD_ENET_TX_RCMASK (0x003c)
+#define BD_ENET_TX_UN (0x0002)
+#define BD_ENET_TX_CSL (0x0001)
+#define BD_ENET_TX_STATS (0x03ff) /* All status bits */
+
+/* Buffer descriptor control/status used by Transparent mode SCC.
+ */
+#define BD_SCC_TX_LAST (0x0800)
+
+/* Buffer descriptor control/status used by I2C.
+ */
+#define BD_I2C_START (0x0400)
+
+#ifdef CONFIG_CPM
+int cpm_command(u32 command, u8 opcode);
+#else
+static inline int cpm_command(u32 command, u8 opcode)
+{
+ return -ENOSYS;
+}
+#endif /* CONFIG_CPM */
+
+int cpm2_gpiochip_add32(struct device *dev);
+
+#endif
diff --git a/include/soc/fsl/qe/qe.h b/include/soc/fsl/qe/qe.h
index c1036d16ed03..e282ac01ec08 100644
--- a/include/soc/fsl/qe/qe.h
+++ b/include/soc/fsl/qe/qe.h
@@ -17,7 +17,7 @@
#include <linux/spinlock.h>
#include <linux/errno.h>
#include <linux/err.h>
-#include <asm/cpm.h>
+#include <soc/fsl/cpm.h>
#include <soc/fsl/qe/immap_qe.h>
#include <linux/of.h>
#include <linux/of_address.h>
@@ -98,26 +98,25 @@ static inline void qe_reset(void) {}
int cpm_muram_init(void);
#if defined(CONFIG_CPM) || defined(CONFIG_QUICC_ENGINE)
-unsigned long cpm_muram_alloc(unsigned long size, unsigned long align);
-int cpm_muram_free(unsigned long offset);
-unsigned long cpm_muram_alloc_fixed(unsigned long offset, unsigned long size);
+s32 cpm_muram_alloc(unsigned long size, unsigned long align);
+void cpm_muram_free(s32 offset);
+s32 cpm_muram_alloc_fixed(unsigned long offset, unsigned long size);
void __iomem *cpm_muram_addr(unsigned long offset);
unsigned long cpm_muram_offset(void __iomem *addr);
dma_addr_t cpm_muram_dma(void __iomem *addr);
#else
-static inline unsigned long cpm_muram_alloc(unsigned long size,
- unsigned long align)
+static inline s32 cpm_muram_alloc(unsigned long size,
+ unsigned long align)
{
return -ENOSYS;
}
-static inline int cpm_muram_free(unsigned long offset)
+static inline void cpm_muram_free(s32 offset)
{
- return -ENOSYS;
}
-static inline unsigned long cpm_muram_alloc_fixed(unsigned long offset,
- unsigned long size)
+static inline s32 cpm_muram_alloc_fixed(unsigned long offset,
+ unsigned long size)
{
return -ENOSYS;
}
@@ -241,21 +240,37 @@ static inline int qe_alive_during_sleep(void)
#define qe_muram_offset cpm_muram_offset
#define qe_muram_dma cpm_muram_dma
-#define qe_setbits32(_addr, _v) iowrite32be(ioread32be(_addr) | (_v), (_addr))
-#define qe_clrbits32(_addr, _v) iowrite32be(ioread32be(_addr) & ~(_v), (_addr))
+#ifdef CONFIG_PPC32
+#define qe_iowrite8(val, addr) out_8(addr, val)
+#define qe_iowrite16be(val, addr) out_be16(addr, val)
+#define qe_iowrite32be(val, addr) out_be32(addr, val)
+#define qe_ioread8(addr) in_8(addr)
+#define qe_ioread16be(addr) in_be16(addr)
+#define qe_ioread32be(addr) in_be32(addr)
+#else
+#define qe_iowrite8(val, addr) iowrite8(val, addr)
+#define qe_iowrite16be(val, addr) iowrite16be(val, addr)
+#define qe_iowrite32be(val, addr) iowrite32be(val, addr)
+#define qe_ioread8(addr) ioread8(addr)
+#define qe_ioread16be(addr) ioread16be(addr)
+#define qe_ioread32be(addr) ioread32be(addr)
+#endif
+
+#define qe_setbits_be32(_addr, _v) qe_iowrite32be(qe_ioread32be(_addr) | (_v), (_addr))
+#define qe_clrbits_be32(_addr, _v) qe_iowrite32be(qe_ioread32be(_addr) & ~(_v), (_addr))
-#define qe_setbits16(_addr, _v) iowrite16be(ioread16be(_addr) | (_v), (_addr))
-#define qe_clrbits16(_addr, _v) iowrite16be(ioread16be(_addr) & ~(_v), (_addr))
+#define qe_setbits_be16(_addr, _v) qe_iowrite16be(qe_ioread16be(_addr) | (_v), (_addr))
+#define qe_clrbits_be16(_addr, _v) qe_iowrite16be(qe_ioread16be(_addr) & ~(_v), (_addr))
-#define qe_setbits8(_addr, _v) iowrite8(ioread8(_addr) | (_v), (_addr))
-#define qe_clrbits8(_addr, _v) iowrite8(ioread8(_addr) & ~(_v), (_addr))
+#define qe_setbits_8(_addr, _v) qe_iowrite8(qe_ioread8(_addr) | (_v), (_addr))
+#define qe_clrbits_8(_addr, _v) qe_iowrite8(qe_ioread8(_addr) & ~(_v), (_addr))
-#define qe_clrsetbits32(addr, clear, set) \
- iowrite32be((ioread32be(addr) & ~(clear)) | (set), (addr))
-#define qe_clrsetbits16(addr, clear, set) \
- iowrite16be((ioread16be(addr) & ~(clear)) | (set), (addr))
-#define qe_clrsetbits8(addr, clear, set) \
- iowrite8((ioread8(addr) & ~(clear)) | (set), (addr))
+#define qe_clrsetbits_be32(addr, clear, set) \
+ qe_iowrite32be((qe_ioread32be(addr) & ~(clear)) | (set), (addr))
+#define qe_clrsetbits_be16(addr, clear, set) \
+ qe_iowrite16be((qe_ioread16be(addr) & ~(clear)) | (set), (addr))
+#define qe_clrsetbits_8(addr, clear, set) \
+ qe_iowrite8((qe_ioread8(addr) & ~(clear)) | (set), (addr))
/* Structure that defines QE firmware binary files.
*
diff --git a/include/soc/fsl/qe/qe_ic.h b/include/soc/fsl/qe/qe_ic.h
deleted file mode 100644
index 714a9b890d8d..000000000000
--- a/include/soc/fsl/qe/qe_ic.h
+++ /dev/null
@@ -1,135 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-or-later */
-/*
- * Copyright (C) 2006 Freescale Semiconductor, Inc. All rights reserved.
- *
- * Authors: Shlomi Gridish <gridish@freescale.com>
- * Li Yang <leoli@freescale.com>
- *
- * Description:
- * QE IC external definitions and structure.
- */
-#ifndef _ASM_POWERPC_QE_IC_H
-#define _ASM_POWERPC_QE_IC_H
-
-#include <linux/irq.h>
-
-struct device_node;
-struct qe_ic;
-
-#define NUM_OF_QE_IC_GROUPS 6
-
-/* Flags when we init the QE IC */
-#define QE_IC_SPREADMODE_GRP_W 0x00000001
-#define QE_IC_SPREADMODE_GRP_X 0x00000002
-#define QE_IC_SPREADMODE_GRP_Y 0x00000004
-#define QE_IC_SPREADMODE_GRP_Z 0x00000008
-#define QE_IC_SPREADMODE_GRP_RISCA 0x00000010
-#define QE_IC_SPREADMODE_GRP_RISCB 0x00000020
-
-#define QE_IC_LOW_SIGNAL 0x00000100
-#define QE_IC_HIGH_SIGNAL 0x00000200
-
-#define QE_IC_GRP_W_PRI0_DEST_SIGNAL_HIGH 0x00001000
-#define QE_IC_GRP_W_PRI1_DEST_SIGNAL_HIGH 0x00002000
-#define QE_IC_GRP_X_PRI0_DEST_SIGNAL_HIGH 0x00004000
-#define QE_IC_GRP_X_PRI1_DEST_SIGNAL_HIGH 0x00008000
-#define QE_IC_GRP_Y_PRI0_DEST_SIGNAL_HIGH 0x00010000
-#define QE_IC_GRP_Y_PRI1_DEST_SIGNAL_HIGH 0x00020000
-#define QE_IC_GRP_Z_PRI0_DEST_SIGNAL_HIGH 0x00040000
-#define QE_IC_GRP_Z_PRI1_DEST_SIGNAL_HIGH 0x00080000
-#define QE_IC_GRP_RISCA_PRI0_DEST_SIGNAL_HIGH 0x00100000
-#define QE_IC_GRP_RISCA_PRI1_DEST_SIGNAL_HIGH 0x00200000
-#define QE_IC_GRP_RISCB_PRI0_DEST_SIGNAL_HIGH 0x00400000
-#define QE_IC_GRP_RISCB_PRI1_DEST_SIGNAL_HIGH 0x00800000
-#define QE_IC_GRP_W_DEST_SIGNAL_SHIFT (12)
-
-/* QE interrupt sources groups */
-enum qe_ic_grp_id {
- QE_IC_GRP_W = 0, /* QE interrupt controller group W */
- QE_IC_GRP_X, /* QE interrupt controller group X */
- QE_IC_GRP_Y, /* QE interrupt controller group Y */
- QE_IC_GRP_Z, /* QE interrupt controller group Z */
- QE_IC_GRP_RISCA, /* QE interrupt controller RISC group A */
- QE_IC_GRP_RISCB /* QE interrupt controller RISC group B */
-};
-
-#ifdef CONFIG_QUICC_ENGINE
-void qe_ic_init(struct device_node *node, unsigned int flags,
- void (*low_handler)(struct irq_desc *desc),
- void (*high_handler)(struct irq_desc *desc));
-unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic);
-unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic);
-#else
-static inline void qe_ic_init(struct device_node *node, unsigned int flags,
- void (*low_handler)(struct irq_desc *desc),
- void (*high_handler)(struct irq_desc *desc))
-{}
-static inline unsigned int qe_ic_get_low_irq(struct qe_ic *qe_ic)
-{ return 0; }
-static inline unsigned int qe_ic_get_high_irq(struct qe_ic *qe_ic)
-{ return 0; }
-#endif /* CONFIG_QUICC_ENGINE */
-
-void qe_ic_set_highest_priority(unsigned int virq, int high);
-int qe_ic_set_priority(unsigned int virq, unsigned int priority);
-int qe_ic_set_high_priority(unsigned int virq, unsigned int priority, int high);
-
-static inline void qe_ic_cascade_low_ipic(struct irq_desc *desc)
-{
- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
- unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
-
- if (cascade_irq != NO_IRQ)
- generic_handle_irq(cascade_irq);
-}
-
-static inline void qe_ic_cascade_high_ipic(struct irq_desc *desc)
-{
- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
- unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
-
- if (cascade_irq != NO_IRQ)
- generic_handle_irq(cascade_irq);
-}
-
-static inline void qe_ic_cascade_low_mpic(struct irq_desc *desc)
-{
- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
- unsigned int cascade_irq = qe_ic_get_low_irq(qe_ic);
- struct irq_chip *chip = irq_desc_get_chip(desc);
-
- if (cascade_irq != NO_IRQ)
- generic_handle_irq(cascade_irq);
-
- chip->irq_eoi(&desc->irq_data);
-}
-
-static inline void qe_ic_cascade_high_mpic(struct irq_desc *desc)
-{
- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
- unsigned int cascade_irq = qe_ic_get_high_irq(qe_ic);
- struct irq_chip *chip = irq_desc_get_chip(desc);
-
- if (cascade_irq != NO_IRQ)
- generic_handle_irq(cascade_irq);
-
- chip->irq_eoi(&desc->irq_data);
-}
-
-static inline void qe_ic_cascade_muxed_mpic(struct irq_desc *desc)
-{
- struct qe_ic *qe_ic = irq_desc_get_handler_data(desc);
- unsigned int cascade_irq;
- struct irq_chip *chip = irq_desc_get_chip(desc);
-
- cascade_irq = qe_ic_get_high_irq(qe_ic);
- if (cascade_irq == NO_IRQ)
- cascade_irq = qe_ic_get_low_irq(qe_ic);
-
- if (cascade_irq != NO_IRQ)
- generic_handle_irq(cascade_irq);
-
- chip->irq_eoi(&desc->irq_data);
-}
-
-#endif /* _ASM_POWERPC_QE_IC_H */
diff --git a/include/soc/fsl/qe/ucc_fast.h b/include/soc/fsl/qe/ucc_fast.h
index e9cc46042a83..ba0e838f962a 100644
--- a/include/soc/fsl/qe/ucc_fast.h
+++ b/include/soc/fsl/qe/ucc_fast.h
@@ -188,9 +188,9 @@ struct ucc_fast_private {
int stopped_tx; /* Whether channel has been stopped for Tx
(STOP_TX, etc.) */
int stopped_rx; /* Whether channel has been stopped for Rx */
- u32 ucc_fast_tx_virtual_fifo_base_offset;/* pointer to base of Tx
+ s32 ucc_fast_tx_virtual_fifo_base_offset;/* pointer to base of Tx
virtual fifo */
- u32 ucc_fast_rx_virtual_fifo_base_offset;/* pointer to base of Rx
+ s32 ucc_fast_rx_virtual_fifo_base_offset;/* pointer to base of Rx
virtual fifo */
#ifdef STATISTICS
u32 tx_frames; /* Transmitted frames counter. */
diff --git a/include/soc/fsl/qe/ucc_slow.h b/include/soc/fsl/qe/ucc_slow.h
index 8696fdea2ae9..d187a6be83bc 100644
--- a/include/soc/fsl/qe/ucc_slow.h
+++ b/include/soc/fsl/qe/ucc_slow.h
@@ -185,7 +185,7 @@ struct ucc_slow_private {
struct ucc_slow_info *us_info;
struct ucc_slow __iomem *us_regs; /* Ptr to memory map of UCC regs */
struct ucc_slow_pram *us_pram; /* a pointer to the parameter RAM */
- u32 us_pram_offset;
+ s32 us_pram_offset;
int enabled_tx; /* Whether channel is enabled for Tx (ENT) */
int enabled_rx; /* Whether channel is enabled for Rx (ENR) */
int stopped_tx; /* Whether channel has been stopped for Tx
@@ -194,8 +194,8 @@ struct ucc_slow_private {
struct list_head confQ; /* frames passed to chip waiting for tx */
u32 first_tx_bd_mask; /* mask is used in Tx routine to save status
and length for first BD in a frame */
- u32 tx_base_offset; /* first BD in Tx BD table offset (In MURAM) */
- u32 rx_base_offset; /* first BD in Rx BD table offset (In MURAM) */
+ s32 tx_base_offset; /* first BD in Tx BD table offset (In MURAM) */
+ s32 rx_base_offset; /* first BD in Rx BD table offset (In MURAM) */
struct qe_bd *confBd; /* next BD for confirm after Tx */
struct qe_bd *tx_bd; /* next BD for new Tx request */
struct qe_bd *rx_bd; /* next BD to collect after Rx */
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index aa31c05a103a..cfe00e08e85b 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -32,6 +32,7 @@
#define __FSL_QMAN_H
#include <linux/bitops.h>
+#include <linux/device.h>
/* Hardware constants */
#define QM_CHANNEL_SWPORTAL0 0
@@ -915,6 +916,16 @@ u16 qman_affine_channel(int cpu);
struct qman_portal *qman_get_affine_portal(int cpu);
/**
+ * qman_start_using_portal - register a device link for the portal user
+ * @p: the portal that will be in use
+ * @dev: the device that will use the portal
+ *
+ * Makes sure that the devices that use the portal are unbound when the
+ * portal is unbound
+ */
+int qman_start_using_portal(struct qman_portal *p, struct device *dev);
+
+/**
* qman_p_poll_dqrr - process DQRR (fast-path) entries
* @limit: the maximum number of DQRR entries to process
*
diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h
new file mode 100644
index 000000000000..068f96b1a83e
--- /dev/null
+++ b/include/soc/mscc/ocelot.h
@@ -0,0 +1,545 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/* Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _SOC_MSCC_OCELOT_H
+#define _SOC_MSCC_OCELOT_H
+
+#include <linux/ptp_clock_kernel.h>
+#include <linux/net_tstamp.h>
+#include <linux/if_vlan.h>
+#include <linux/regmap.h>
+#include <net/dsa.h>
+
+#define IFH_INJ_BYPASS BIT(31)
+#define IFH_INJ_POP_CNT_DISABLE (3 << 28)
+
+#define IFH_TAG_TYPE_C 0
+#define IFH_TAG_TYPE_S 1
+
+#define IFH_REW_OP_NOOP 0x0
+#define IFH_REW_OP_DSCP 0x1
+#define IFH_REW_OP_ONE_STEP_PTP 0x2
+#define IFH_REW_OP_TWO_STEP_PTP 0x3
+#define IFH_REW_OP_ORIGIN_PTP 0x5
+
+#define OCELOT_TAG_LEN 16
+#define OCELOT_SHORT_PREFIX_LEN 4
+#define OCELOT_LONG_PREFIX_LEN 16
+
+#define OCELOT_SPEED_2500 0
+#define OCELOT_SPEED_1000 1
+#define OCELOT_SPEED_100 2
+#define OCELOT_SPEED_10 3
+
+#define TARGET_OFFSET 24
+#define REG_MASK GENMASK(TARGET_OFFSET - 1, 0)
+#define REG(reg, offset) [reg & REG_MASK] = offset
+
+#define REG_RESERVED_ADDR 0xffffffff
+#define REG_RESERVED(reg) REG(reg, REG_RESERVED_ADDR)
+
+enum ocelot_target {
+ ANA = 1,
+ QS,
+ QSYS,
+ REW,
+ SYS,
+ S2,
+ HSIO,
+ PTP,
+ GCB,
+ TARGET_MAX,
+};
+
+enum ocelot_reg {
+ ANA_ADVLEARN = ANA << TARGET_OFFSET,
+ ANA_VLANMASK,
+ ANA_PORT_B_DOMAIN,
+ ANA_ANAGEFIL,
+ ANA_ANEVENTS,
+ ANA_STORMLIMIT_BURST,
+ ANA_STORMLIMIT_CFG,
+ ANA_ISOLATED_PORTS,
+ ANA_COMMUNITY_PORTS,
+ ANA_AUTOAGE,
+ ANA_MACTOPTIONS,
+ ANA_LEARNDISC,
+ ANA_AGENCTRL,
+ ANA_MIRRORPORTS,
+ ANA_EMIRRORPORTS,
+ ANA_FLOODING,
+ ANA_FLOODING_IPMC,
+ ANA_SFLOW_CFG,
+ ANA_PORT_MODE,
+ ANA_CUT_THRU_CFG,
+ ANA_PGID_PGID,
+ ANA_TABLES_ANMOVED,
+ ANA_TABLES_MACHDATA,
+ ANA_TABLES_MACLDATA,
+ ANA_TABLES_STREAMDATA,
+ ANA_TABLES_MACACCESS,
+ ANA_TABLES_MACTINDX,
+ ANA_TABLES_VLANACCESS,
+ ANA_TABLES_VLANTIDX,
+ ANA_TABLES_ISDXACCESS,
+ ANA_TABLES_ISDXTIDX,
+ ANA_TABLES_ENTRYLIM,
+ ANA_TABLES_PTP_ID_HIGH,
+ ANA_TABLES_PTP_ID_LOW,
+ ANA_TABLES_STREAMACCESS,
+ ANA_TABLES_STREAMTIDX,
+ ANA_TABLES_SEQ_HISTORY,
+ ANA_TABLES_SEQ_MASK,
+ ANA_TABLES_SFID_MASK,
+ ANA_TABLES_SFIDACCESS,
+ ANA_TABLES_SFIDTIDX,
+ ANA_MSTI_STATE,
+ ANA_OAM_UPM_LM_CNT,
+ ANA_SG_ACCESS_CTRL,
+ ANA_SG_CONFIG_REG_1,
+ ANA_SG_CONFIG_REG_2,
+ ANA_SG_CONFIG_REG_3,
+ ANA_SG_CONFIG_REG_4,
+ ANA_SG_CONFIG_REG_5,
+ ANA_SG_GCL_GS_CONFIG,
+ ANA_SG_GCL_TI_CONFIG,
+ ANA_SG_STATUS_REG_1,
+ ANA_SG_STATUS_REG_2,
+ ANA_SG_STATUS_REG_3,
+ ANA_PORT_VLAN_CFG,
+ ANA_PORT_DROP_CFG,
+ ANA_PORT_QOS_CFG,
+ ANA_PORT_VCAP_CFG,
+ ANA_PORT_VCAP_S1_KEY_CFG,
+ ANA_PORT_VCAP_S2_CFG,
+ ANA_PORT_PCP_DEI_MAP,
+ ANA_PORT_CPU_FWD_CFG,
+ ANA_PORT_CPU_FWD_BPDU_CFG,
+ ANA_PORT_CPU_FWD_GARP_CFG,
+ ANA_PORT_CPU_FWD_CCM_CFG,
+ ANA_PORT_PORT_CFG,
+ ANA_PORT_POL_CFG,
+ ANA_PORT_PTP_CFG,
+ ANA_PORT_PTP_DLY1_CFG,
+ ANA_PORT_PTP_DLY2_CFG,
+ ANA_PORT_SFID_CFG,
+ ANA_PFC_PFC_CFG,
+ ANA_PFC_PFC_TIMER,
+ ANA_IPT_OAM_MEP_CFG,
+ ANA_IPT_IPT,
+ ANA_PPT_PPT,
+ ANA_FID_MAP_FID_MAP,
+ ANA_AGGR_CFG,
+ ANA_CPUQ_CFG,
+ ANA_CPUQ_CFG2,
+ ANA_CPUQ_8021_CFG,
+ ANA_DSCP_CFG,
+ ANA_DSCP_REWR_CFG,
+ ANA_VCAP_RNG_TYPE_CFG,
+ ANA_VCAP_RNG_VAL_CFG,
+ ANA_VRAP_CFG,
+ ANA_VRAP_HDR_DATA,
+ ANA_VRAP_HDR_MASK,
+ ANA_DISCARD_CFG,
+ ANA_FID_CFG,
+ ANA_POL_PIR_CFG,
+ ANA_POL_CIR_CFG,
+ ANA_POL_MODE_CFG,
+ ANA_POL_PIR_STATE,
+ ANA_POL_CIR_STATE,
+ ANA_POL_STATE,
+ ANA_POL_FLOWC,
+ ANA_POL_HYST,
+ ANA_POL_MISC_CFG,
+ QS_XTR_GRP_CFG = QS << TARGET_OFFSET,
+ QS_XTR_RD,
+ QS_XTR_FRM_PRUNING,
+ QS_XTR_FLUSH,
+ QS_XTR_DATA_PRESENT,
+ QS_XTR_CFG,
+ QS_INJ_GRP_CFG,
+ QS_INJ_WR,
+ QS_INJ_CTRL,
+ QS_INJ_STATUS,
+ QS_INJ_ERR,
+ QS_INH_DBG,
+ QSYS_PORT_MODE = QSYS << TARGET_OFFSET,
+ QSYS_SWITCH_PORT_MODE,
+ QSYS_STAT_CNT_CFG,
+ QSYS_EEE_CFG,
+ QSYS_EEE_THRES,
+ QSYS_IGR_NO_SHARING,
+ QSYS_EGR_NO_SHARING,
+ QSYS_SW_STATUS,
+ QSYS_EXT_CPU_CFG,
+ QSYS_PAD_CFG,
+ QSYS_CPU_GROUP_MAP,
+ QSYS_QMAP,
+ QSYS_ISDX_SGRP,
+ QSYS_TIMED_FRAME_ENTRY,
+ QSYS_TFRM_MISC,
+ QSYS_TFRM_PORT_DLY,
+ QSYS_TFRM_TIMER_CFG_1,
+ QSYS_TFRM_TIMER_CFG_2,
+ QSYS_TFRM_TIMER_CFG_3,
+ QSYS_TFRM_TIMER_CFG_4,
+ QSYS_TFRM_TIMER_CFG_5,
+ QSYS_TFRM_TIMER_CFG_6,
+ QSYS_TFRM_TIMER_CFG_7,
+ QSYS_TFRM_TIMER_CFG_8,
+ QSYS_RED_PROFILE,
+ QSYS_RES_QOS_MODE,
+ QSYS_RES_CFG,
+ QSYS_RES_STAT,
+ QSYS_EGR_DROP_MODE,
+ QSYS_EQ_CTRL,
+ QSYS_EVENTS_CORE,
+ QSYS_QMAXSDU_CFG_0,
+ QSYS_QMAXSDU_CFG_1,
+ QSYS_QMAXSDU_CFG_2,
+ QSYS_QMAXSDU_CFG_3,
+ QSYS_QMAXSDU_CFG_4,
+ QSYS_QMAXSDU_CFG_5,
+ QSYS_QMAXSDU_CFG_6,
+ QSYS_QMAXSDU_CFG_7,
+ QSYS_PREEMPTION_CFG,
+ QSYS_CIR_CFG,
+ QSYS_EIR_CFG,
+ QSYS_SE_CFG,
+ QSYS_SE_DWRR_CFG,
+ QSYS_SE_CONNECT,
+ QSYS_SE_DLB_SENSE,
+ QSYS_CIR_STATE,
+ QSYS_EIR_STATE,
+ QSYS_SE_STATE,
+ QSYS_HSCH_MISC_CFG,
+ QSYS_TAG_CONFIG,
+ QSYS_TAS_PARAM_CFG_CTRL,
+ QSYS_PORT_MAX_SDU,
+ QSYS_PARAM_CFG_REG_1,
+ QSYS_PARAM_CFG_REG_2,
+ QSYS_PARAM_CFG_REG_3,
+ QSYS_PARAM_CFG_REG_4,
+ QSYS_PARAM_CFG_REG_5,
+ QSYS_GCL_CFG_REG_1,
+ QSYS_GCL_CFG_REG_2,
+ QSYS_PARAM_STATUS_REG_1,
+ QSYS_PARAM_STATUS_REG_2,
+ QSYS_PARAM_STATUS_REG_3,
+ QSYS_PARAM_STATUS_REG_4,
+ QSYS_PARAM_STATUS_REG_5,
+ QSYS_PARAM_STATUS_REG_6,
+ QSYS_PARAM_STATUS_REG_7,
+ QSYS_PARAM_STATUS_REG_8,
+ QSYS_PARAM_STATUS_REG_9,
+ QSYS_GCL_STATUS_REG_1,
+ QSYS_GCL_STATUS_REG_2,
+ REW_PORT_VLAN_CFG = REW << TARGET_OFFSET,
+ REW_TAG_CFG,
+ REW_PORT_CFG,
+ REW_DSCP_CFG,
+ REW_PCP_DEI_QOS_MAP_CFG,
+ REW_PTP_CFG,
+ REW_PTP_DLY1_CFG,
+ REW_RED_TAG_CFG,
+ REW_DSCP_REMAP_DP1_CFG,
+ REW_DSCP_REMAP_CFG,
+ REW_STAT_CFG,
+ REW_REW_STICKY,
+ REW_PPT,
+ SYS_COUNT_RX_OCTETS = SYS << TARGET_OFFSET,
+ SYS_COUNT_RX_UNICAST,
+ SYS_COUNT_RX_MULTICAST,
+ SYS_COUNT_RX_BROADCAST,
+ SYS_COUNT_RX_SHORTS,
+ SYS_COUNT_RX_FRAGMENTS,
+ SYS_COUNT_RX_JABBERS,
+ SYS_COUNT_RX_CRC_ALIGN_ERRS,
+ SYS_COUNT_RX_SYM_ERRS,
+ SYS_COUNT_RX_64,
+ SYS_COUNT_RX_65_127,
+ SYS_COUNT_RX_128_255,
+ SYS_COUNT_RX_256_1023,
+ SYS_COUNT_RX_1024_1526,
+ SYS_COUNT_RX_1527_MAX,
+ SYS_COUNT_RX_PAUSE,
+ SYS_COUNT_RX_CONTROL,
+ SYS_COUNT_RX_LONGS,
+ SYS_COUNT_RX_CLASSIFIED_DROPS,
+ SYS_COUNT_TX_OCTETS,
+ SYS_COUNT_TX_UNICAST,
+ SYS_COUNT_TX_MULTICAST,
+ SYS_COUNT_TX_BROADCAST,
+ SYS_COUNT_TX_COLLISION,
+ SYS_COUNT_TX_DROPS,
+ SYS_COUNT_TX_PAUSE,
+ SYS_COUNT_TX_64,
+ SYS_COUNT_TX_65_127,
+ SYS_COUNT_TX_128_511,
+ SYS_COUNT_TX_512_1023,
+ SYS_COUNT_TX_1024_1526,
+ SYS_COUNT_TX_1527_MAX,
+ SYS_COUNT_TX_AGING,
+ SYS_RESET_CFG,
+ SYS_SR_ETYPE_CFG,
+ SYS_VLAN_ETYPE_CFG,
+ SYS_PORT_MODE,
+ SYS_FRONT_PORT_MODE,
+ SYS_FRM_AGING,
+ SYS_STAT_CFG,
+ SYS_SW_STATUS,
+ SYS_MISC_CFG,
+ SYS_REW_MAC_HIGH_CFG,
+ SYS_REW_MAC_LOW_CFG,
+ SYS_TIMESTAMP_OFFSET,
+ SYS_CMID,
+ SYS_PAUSE_CFG,
+ SYS_PAUSE_TOT_CFG,
+ SYS_ATOP,
+ SYS_ATOP_TOT_CFG,
+ SYS_MAC_FC_CFG,
+ SYS_MMGT,
+ SYS_MMGT_FAST,
+ SYS_EVENTS_DIF,
+ SYS_EVENTS_CORE,
+ SYS_CNT,
+ SYS_PTP_STATUS,
+ SYS_PTP_TXSTAMP,
+ SYS_PTP_NXT,
+ SYS_PTP_CFG,
+ SYS_RAM_INIT,
+ SYS_CM_ADDR,
+ SYS_CM_DATA_WR,
+ SYS_CM_DATA_RD,
+ SYS_CM_OP,
+ SYS_CM_DATA,
+ S2_CORE_UPDATE_CTRL = S2 << TARGET_OFFSET,
+ S2_CORE_MV_CFG,
+ S2_CACHE_ENTRY_DAT,
+ S2_CACHE_MASK_DAT,
+ S2_CACHE_ACTION_DAT,
+ S2_CACHE_CNT_DAT,
+ S2_CACHE_TG_DAT,
+ PTP_PIN_CFG = PTP << TARGET_OFFSET,
+ PTP_PIN_TOD_SEC_MSB,
+ PTP_PIN_TOD_SEC_LSB,
+ PTP_PIN_TOD_NSEC,
+ PTP_CFG_MISC,
+ PTP_CLK_CFG_ADJ_CFG,
+ PTP_CLK_CFG_ADJ_FREQ,
+ GCB_SOFT_RST = GCB << TARGET_OFFSET,
+};
+
+enum ocelot_regfield {
+ ANA_ADVLEARN_VLAN_CHK,
+ ANA_ADVLEARN_LEARN_MIRROR,
+ ANA_ANEVENTS_FLOOD_DISCARD,
+ ANA_ANEVENTS_MSTI_DROP,
+ ANA_ANEVENTS_ACLKILL,
+ ANA_ANEVENTS_ACLUSED,
+ ANA_ANEVENTS_AUTOAGE,
+ ANA_ANEVENTS_VS2TTL1,
+ ANA_ANEVENTS_STORM_DROP,
+ ANA_ANEVENTS_LEARN_DROP,
+ ANA_ANEVENTS_AGED_ENTRY,
+ ANA_ANEVENTS_CPU_LEARN_FAILED,
+ ANA_ANEVENTS_AUTO_LEARN_FAILED,
+ ANA_ANEVENTS_LEARN_REMOVE,
+ ANA_ANEVENTS_AUTO_LEARNED,
+ ANA_ANEVENTS_AUTO_MOVED,
+ ANA_ANEVENTS_DROPPED,
+ ANA_ANEVENTS_CLASSIFIED_DROP,
+ ANA_ANEVENTS_CLASSIFIED_COPY,
+ ANA_ANEVENTS_VLAN_DISCARD,
+ ANA_ANEVENTS_FWD_DISCARD,
+ ANA_ANEVENTS_MULTICAST_FLOOD,
+ ANA_ANEVENTS_UNICAST_FLOOD,
+ ANA_ANEVENTS_DEST_KNOWN,
+ ANA_ANEVENTS_BUCKET3_MATCH,
+ ANA_ANEVENTS_BUCKET2_MATCH,
+ ANA_ANEVENTS_BUCKET1_MATCH,
+ ANA_ANEVENTS_BUCKET0_MATCH,
+ ANA_ANEVENTS_CPU_OPERATION,
+ ANA_ANEVENTS_DMAC_LOOKUP,
+ ANA_ANEVENTS_SMAC_LOOKUP,
+ ANA_ANEVENTS_SEQ_GEN_ERR_0,
+ ANA_ANEVENTS_SEQ_GEN_ERR_1,
+ ANA_TABLES_MACACCESS_B_DOM,
+ ANA_TABLES_MACTINDX_BUCKET,
+ ANA_TABLES_MACTINDX_M_INDEX,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_VLD,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_FP,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_PORTNO,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_TM_SEL,
+ QSYS_TIMED_FRAME_ENTRY_TFRM_TM_T,
+ SYS_RESET_CFG_CORE_ENA,
+ SYS_RESET_CFG_MEM_ENA,
+ SYS_RESET_CFG_MEM_INIT,
+ GCB_SOFT_RST_SWC_RST,
+ REGFIELD_MAX
+};
+
+enum ocelot_clk_pins {
+ ALT_PPS_PIN = 1,
+ EXT_CLK_PIN,
+ ALT_LDST_PIN,
+ TOD_ACC_PIN
+};
+
+struct ocelot_stat_layout {
+ u32 offset;
+ char name[ETH_GSTRING_LEN];
+};
+
+enum ocelot_tag_prefix {
+ OCELOT_TAG_PREFIX_DISABLED = 0,
+ OCELOT_TAG_PREFIX_NONE,
+ OCELOT_TAG_PREFIX_SHORT,
+ OCELOT_TAG_PREFIX_LONG,
+};
+
+struct ocelot;
+
+struct ocelot_ops {
+ void (*pcs_init)(struct ocelot *ocelot, int port);
+ int (*reset)(struct ocelot *ocelot);
+};
+
+struct ocelot_port {
+ struct ocelot *ocelot;
+
+ void __iomem *regs;
+
+ /* Ingress default VLAN (pvid) */
+ u16 pvid;
+
+ /* Egress default VLAN (vid) */
+ u16 vid;
+
+ u8 ptp_cmd;
+ struct sk_buff_head tx_skbs;
+ u8 ts_id;
+
+ phy_interface_t phy_mode;
+};
+
+struct ocelot {
+ struct device *dev;
+
+ const struct ocelot_ops *ops;
+ struct regmap *targets[TARGET_MAX];
+ struct regmap_field *regfields[REGFIELD_MAX];
+ const u32 *const *map;
+ const struct ocelot_stat_layout *stats_layout;
+ unsigned int num_stats;
+
+ int shared_queue_sz;
+
+ struct net_device *hw_bridge_dev;
+ u16 bridge_mask;
+ u16 bridge_fwd_mask;
+
+ struct ocelot_port **ports;
+
+ u8 base_mac[ETH_ALEN];
+
+ /* Keep track of the vlan port masks */
+ u32 vlan_mask[VLAN_N_VID];
+
+ u8 num_phys_ports;
+ u8 num_cpu_ports;
+ u8 cpu;
+
+ u32 *lags;
+
+ struct list_head multicast;
+
+ /* Workqueue to check statistics for overflow with its lock */
+ struct mutex stats_lock;
+ u64 *stats;
+ struct delayed_work stats_work;
+ struct workqueue_struct *stats_queue;
+
+ u8 ptp:1;
+ struct ptp_clock *ptp_clock;
+ struct ptp_clock_info ptp_info;
+ struct hwtstamp_config hwtstamp_config;
+ /* Protects the PTP interface state */
+ struct mutex ptp_lock;
+ /* Protects the PTP clock */
+ spinlock_t ptp_clock_lock;
+
+ void (*port_pcs_init)(struct ocelot_port *port);
+};
+
+#define ocelot_read_ix(ocelot, reg, gi, ri) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_read_gix(ocelot, reg, gi) __ocelot_read_ix(ocelot, reg, reg##_GSZ * (gi))
+#define ocelot_read_rix(ocelot, reg, ri) __ocelot_read_ix(ocelot, reg, reg##_RSZ * (ri))
+#define ocelot_read(ocelot, reg) __ocelot_read_ix(ocelot, reg, 0)
+
+#define ocelot_write_ix(ocelot, val, reg, gi, ri) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_write_gix(ocelot, val, reg, gi) __ocelot_write_ix(ocelot, val, reg, reg##_GSZ * (gi))
+#define ocelot_write_rix(ocelot, val, reg, ri) __ocelot_write_ix(ocelot, val, reg, reg##_RSZ * (ri))
+#define ocelot_write(ocelot, val, reg) __ocelot_write_ix(ocelot, val, reg, 0)
+
+#define ocelot_rmw_ix(ocelot, val, m, reg, gi, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi) + reg##_RSZ * (ri))
+#define ocelot_rmw_gix(ocelot, val, m, reg, gi) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_GSZ * (gi))
+#define ocelot_rmw_rix(ocelot, val, m, reg, ri) __ocelot_rmw_ix(ocelot, val, m, reg, reg##_RSZ * (ri))
+#define ocelot_rmw(ocelot, val, m, reg) __ocelot_rmw_ix(ocelot, val, m, reg, 0)
+
+/* I/O */
+u32 ocelot_port_readl(struct ocelot_port *port, u32 reg);
+void ocelot_port_writel(struct ocelot_port *port, u32 val, u32 reg);
+u32 __ocelot_read_ix(struct ocelot *ocelot, u32 reg, u32 offset);
+void __ocelot_write_ix(struct ocelot *ocelot, u32 val, u32 reg, u32 offset);
+void __ocelot_rmw_ix(struct ocelot *ocelot, u32 val, u32 mask, u32 reg,
+ u32 offset);
+
+/* Hardware initialization */
+int ocelot_regfields_init(struct ocelot *ocelot,
+ const struct reg_field *const regfields);
+struct regmap *ocelot_regmap_init(struct ocelot *ocelot, struct resource *res);
+void ocelot_set_cpu_port(struct ocelot *ocelot, int cpu,
+ enum ocelot_tag_prefix injection,
+ enum ocelot_tag_prefix extraction);
+int ocelot_init(struct ocelot *ocelot);
+void ocelot_deinit(struct ocelot *ocelot);
+void ocelot_init_port(struct ocelot *ocelot, int port);
+
+/* DSA callbacks */
+void ocelot_port_enable(struct ocelot *ocelot, int port,
+ struct phy_device *phy);
+void ocelot_port_disable(struct ocelot *ocelot, int port);
+void ocelot_get_strings(struct ocelot *ocelot, int port, u32 sset, u8 *data);
+void ocelot_get_ethtool_stats(struct ocelot *ocelot, int port, u64 *data);
+int ocelot_get_sset_count(struct ocelot *ocelot, int port, int sset);
+int ocelot_get_ts_info(struct ocelot *ocelot, int port,
+ struct ethtool_ts_info *info);
+void ocelot_set_ageing_time(struct ocelot *ocelot, unsigned int msecs);
+void ocelot_adjust_link(struct ocelot *ocelot, int port,
+ struct phy_device *phydev);
+void ocelot_port_vlan_filtering(struct ocelot *ocelot, int port,
+ bool vlan_aware);
+void ocelot_bridge_stp_state_set(struct ocelot *ocelot, int port, u8 state);
+int ocelot_port_bridge_join(struct ocelot *ocelot, int port,
+ struct net_device *bridge);
+int ocelot_port_bridge_leave(struct ocelot *ocelot, int port,
+ struct net_device *bridge);
+int ocelot_fdb_dump(struct ocelot *ocelot, int port,
+ dsa_fdb_dump_cb_t *cb, void *data);
+int ocelot_fdb_add(struct ocelot *ocelot, int port,
+ const unsigned char *addr, u16 vid, bool vlan_aware);
+int ocelot_fdb_del(struct ocelot *ocelot, int port,
+ const unsigned char *addr, u16 vid);
+int ocelot_vlan_add(struct ocelot *ocelot, int port, u16 vid, bool pvid,
+ bool untagged);
+int ocelot_vlan_del(struct ocelot *ocelot, int port, u16 vid);
+int ocelot_hwstamp_get(struct ocelot *ocelot, int port, struct ifreq *ifr);
+int ocelot_hwstamp_set(struct ocelot *ocelot, int port, struct ifreq *ifr);
+int ocelot_ptp_gettime64(struct ptp_clock_info *ptp, struct timespec64 *ts);
+int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port,
+ struct sk_buff *skb);
+void ocelot_get_txtstamp(struct ocelot *ocelot);
+
+#endif
diff --git a/include/soc/mscc/ocelot_ana.h b/include/soc/mscc/ocelot_ana.h
new file mode 100644
index 000000000000..841c6ec22b64
--- /dev/null
+++ b/include/soc/mscc/ocelot_ana.h
@@ -0,0 +1,625 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_ANA_H_
+#define _MSCC_OCELOT_ANA_H_
+
+#define ANA_ANAGEFIL_B_DOM_EN BIT(22)
+#define ANA_ANAGEFIL_B_DOM_VAL BIT(21)
+#define ANA_ANAGEFIL_AGE_LOCKED BIT(20)
+#define ANA_ANAGEFIL_PID_EN BIT(19)
+#define ANA_ANAGEFIL_PID_VAL(x) (((x) << 14) & GENMASK(18, 14))
+#define ANA_ANAGEFIL_PID_VAL_M GENMASK(18, 14)
+#define ANA_ANAGEFIL_PID_VAL_X(x) (((x) & GENMASK(18, 14)) >> 14)
+#define ANA_ANAGEFIL_VID_EN BIT(13)
+#define ANA_ANAGEFIL_VID_VAL(x) ((x) & GENMASK(12, 0))
+#define ANA_ANAGEFIL_VID_VAL_M GENMASK(12, 0)
+
+#define ANA_STORMLIMIT_CFG_RSZ 0x4
+
+#define ANA_STORMLIMIT_CFG_STORM_RATE(x) (((x) << 3) & GENMASK(6, 3))
+#define ANA_STORMLIMIT_CFG_STORM_RATE_M GENMASK(6, 3)
+#define ANA_STORMLIMIT_CFG_STORM_RATE_X(x) (((x) & GENMASK(6, 3)) >> 3)
+#define ANA_STORMLIMIT_CFG_STORM_UNIT BIT(2)
+#define ANA_STORMLIMIT_CFG_STORM_MODE(x) ((x) & GENMASK(1, 0))
+#define ANA_STORMLIMIT_CFG_STORM_MODE_M GENMASK(1, 0)
+
+#define ANA_AUTOAGE_AGE_FAST BIT(21)
+#define ANA_AUTOAGE_AGE_PERIOD(x) (((x) << 1) & GENMASK(20, 1))
+#define ANA_AUTOAGE_AGE_PERIOD_M GENMASK(20, 1)
+#define ANA_AUTOAGE_AGE_PERIOD_X(x) (((x) & GENMASK(20, 1)) >> 1)
+#define ANA_AUTOAGE_AUTOAGE_LOCKED BIT(0)
+
+#define ANA_MACTOPTIONS_REDUCED_TABLE BIT(1)
+#define ANA_MACTOPTIONS_SHADOW BIT(0)
+
+#define ANA_AGENCTRL_FID_MASK(x) (((x) << 12) & GENMASK(23, 12))
+#define ANA_AGENCTRL_FID_MASK_M GENMASK(23, 12)
+#define ANA_AGENCTRL_FID_MASK_X(x) (((x) & GENMASK(23, 12)) >> 12)
+#define ANA_AGENCTRL_IGNORE_DMAC_FLAGS BIT(11)
+#define ANA_AGENCTRL_IGNORE_SMAC_FLAGS BIT(10)
+#define ANA_AGENCTRL_FLOOD_SPECIAL BIT(9)
+#define ANA_AGENCTRL_FLOOD_IGNORE_VLAN BIT(8)
+#define ANA_AGENCTRL_MIRROR_CPU BIT(7)
+#define ANA_AGENCTRL_LEARN_CPU_COPY BIT(6)
+#define ANA_AGENCTRL_LEARN_FWD_KILL BIT(5)
+#define ANA_AGENCTRL_LEARN_IGNORE_VLAN BIT(4)
+#define ANA_AGENCTRL_CPU_CPU_KILL_ENA BIT(3)
+#define ANA_AGENCTRL_GREEN_COUNT_MODE BIT(2)
+#define ANA_AGENCTRL_YELLOW_COUNT_MODE BIT(1)
+#define ANA_AGENCTRL_RED_COUNT_MODE BIT(0)
+
+#define ANA_FLOODING_RSZ 0x4
+
+#define ANA_FLOODING_FLD_UNICAST(x) (((x) << 12) & GENMASK(17, 12))
+#define ANA_FLOODING_FLD_UNICAST_M GENMASK(17, 12)
+#define ANA_FLOODING_FLD_UNICAST_X(x) (((x) & GENMASK(17, 12)) >> 12)
+#define ANA_FLOODING_FLD_BROADCAST(x) (((x) << 6) & GENMASK(11, 6))
+#define ANA_FLOODING_FLD_BROADCAST_M GENMASK(11, 6)
+#define ANA_FLOODING_FLD_BROADCAST_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define ANA_FLOODING_FLD_MULTICAST(x) ((x) & GENMASK(5, 0))
+#define ANA_FLOODING_FLD_MULTICAST_M GENMASK(5, 0)
+
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL(x) (((x) << 18) & GENMASK(23, 18))
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_M GENMASK(23, 18)
+#define ANA_FLOODING_IPMC_FLD_MC4_CTRL_X(x) (((x) & GENMASK(23, 18)) >> 18)
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA(x) (((x) << 12) & GENMASK(17, 12))
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA_M GENMASK(17, 12)
+#define ANA_FLOODING_IPMC_FLD_MC4_DATA_X(x) (((x) & GENMASK(17, 12)) >> 12)
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL(x) (((x) << 6) & GENMASK(11, 6))
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_M GENMASK(11, 6)
+#define ANA_FLOODING_IPMC_FLD_MC6_CTRL_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA(x) ((x) & GENMASK(5, 0))
+#define ANA_FLOODING_IPMC_FLD_MC6_DATA_M GENMASK(5, 0)
+
+#define ANA_SFLOW_CFG_RSZ 0x4
+
+#define ANA_SFLOW_CFG_SF_RATE(x) (((x) << 2) & GENMASK(13, 2))
+#define ANA_SFLOW_CFG_SF_RATE_M GENMASK(13, 2)
+#define ANA_SFLOW_CFG_SF_RATE_X(x) (((x) & GENMASK(13, 2)) >> 2)
+#define ANA_SFLOW_CFG_SF_SAMPLE_RX BIT(1)
+#define ANA_SFLOW_CFG_SF_SAMPLE_TX BIT(0)
+
+#define ANA_PORT_MODE_RSZ 0x4
+
+#define ANA_PORT_MODE_REDTAG_PARSE_CFG BIT(3)
+#define ANA_PORT_MODE_VLAN_PARSE_CFG(x) (((x) << 1) & GENMASK(2, 1))
+#define ANA_PORT_MODE_VLAN_PARSE_CFG_M GENMASK(2, 1)
+#define ANA_PORT_MODE_VLAN_PARSE_CFG_X(x) (((x) & GENMASK(2, 1)) >> 1)
+#define ANA_PORT_MODE_L3_PARSE_CFG BIT(0)
+
+#define ANA_CUT_THRU_CFG_RSZ 0x4
+
+#define ANA_PGID_PGID_RSZ 0x4
+
+#define ANA_PGID_PGID_PGID(x) ((x) & GENMASK(11, 0))
+#define ANA_PGID_PGID_PGID_M GENMASK(11, 0)
+#define ANA_PGID_PGID_CPUQ_DST_PGID(x) (((x) << 27) & GENMASK(29, 27))
+#define ANA_PGID_PGID_CPUQ_DST_PGID_M GENMASK(29, 27)
+#define ANA_PGID_PGID_CPUQ_DST_PGID_X(x) (((x) & GENMASK(29, 27)) >> 27)
+
+#define ANA_TABLES_MACHDATA_VID(x) (((x) << 16) & GENMASK(28, 16))
+#define ANA_TABLES_MACHDATA_VID_M GENMASK(28, 16)
+#define ANA_TABLES_MACHDATA_VID_X(x) (((x) & GENMASK(28, 16)) >> 16)
+#define ANA_TABLES_MACHDATA_MACHDATA(x) ((x) & GENMASK(15, 0))
+#define ANA_TABLES_MACHDATA_MACHDATA_M GENMASK(15, 0)
+
+#define ANA_TABLES_STREAMDATA_SSID_VALID BIT(16)
+#define ANA_TABLES_STREAMDATA_SSID(x) (((x) << 9) & GENMASK(15, 9))
+#define ANA_TABLES_STREAMDATA_SSID_M GENMASK(15, 9)
+#define ANA_TABLES_STREAMDATA_SSID_X(x) (((x) & GENMASK(15, 9)) >> 9)
+#define ANA_TABLES_STREAMDATA_SFID_VALID BIT(8)
+#define ANA_TABLES_STREAMDATA_SFID(x) ((x) & GENMASK(7, 0))
+#define ANA_TABLES_STREAMDATA_SFID_M GENMASK(7, 0)
+
+#define ANA_TABLES_MACACCESS_MAC_CPU_COPY BIT(15)
+#define ANA_TABLES_MACACCESS_SRC_KILL BIT(14)
+#define ANA_TABLES_MACACCESS_IGNORE_VLAN BIT(13)
+#define ANA_TABLES_MACACCESS_AGED_FLAG BIT(12)
+#define ANA_TABLES_MACACCESS_VALID BIT(11)
+#define ANA_TABLES_MACACCESS_ENTRYTYPE(x) (((x) << 9) & GENMASK(10, 9))
+#define ANA_TABLES_MACACCESS_ENTRYTYPE_M GENMASK(10, 9)
+#define ANA_TABLES_MACACCESS_ENTRYTYPE_X(x) (((x) & GENMASK(10, 9)) >> 9)
+#define ANA_TABLES_MACACCESS_DEST_IDX(x) (((x) << 3) & GENMASK(8, 3))
+#define ANA_TABLES_MACACCESS_DEST_IDX_M GENMASK(8, 3)
+#define ANA_TABLES_MACACCESS_DEST_IDX_X(x) (((x) & GENMASK(8, 3)) >> 3)
+#define ANA_TABLES_MACACCESS_MAC_TABLE_CMD(x) ((x) & GENMASK(2, 0))
+#define ANA_TABLES_MACACCESS_MAC_TABLE_CMD_M GENMASK(2, 0)
+#define MACACCESS_CMD_IDLE 0
+#define MACACCESS_CMD_LEARN 1
+#define MACACCESS_CMD_FORGET 2
+#define MACACCESS_CMD_AGE 3
+#define MACACCESS_CMD_GET_NEXT 4
+#define MACACCESS_CMD_INIT 5
+#define MACACCESS_CMD_READ 6
+#define MACACCESS_CMD_WRITE 7
+
+#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK(x) (((x) << 2) & GENMASK(13, 2))
+#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK_M GENMASK(13, 2)
+#define ANA_TABLES_VLANACCESS_VLAN_PORT_MASK_X(x) (((x) & GENMASK(13, 2)) >> 2)
+#define ANA_TABLES_VLANACCESS_VLAN_TBL_CMD(x) ((x) & GENMASK(1, 0))
+#define ANA_TABLES_VLANACCESS_VLAN_TBL_CMD_M GENMASK(1, 0)
+#define ANA_TABLES_VLANACCESS_CMD_IDLE 0x0
+#define ANA_TABLES_VLANACCESS_CMD_WRITE 0x2
+#define ANA_TABLES_VLANACCESS_CMD_INIT 0x3
+
+#define ANA_TABLES_VLANTIDX_VLAN_SEC_FWD_ENA BIT(17)
+#define ANA_TABLES_VLANTIDX_VLAN_FLOOD_DIS BIT(16)
+#define ANA_TABLES_VLANTIDX_VLAN_PRIV_VLAN BIT(15)
+#define ANA_TABLES_VLANTIDX_VLAN_LEARN_DISABLED BIT(14)
+#define ANA_TABLES_VLANTIDX_VLAN_MIRROR BIT(13)
+#define ANA_TABLES_VLANTIDX_VLAN_SRC_CHK BIT(12)
+#define ANA_TABLES_VLANTIDX_V_INDEX(x) ((x) & GENMASK(11, 0))
+#define ANA_TABLES_VLANTIDX_V_INDEX_M GENMASK(11, 0)
+
+#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK(x) (((x) << 2) & GENMASK(8, 2))
+#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK_M GENMASK(8, 2)
+#define ANA_TABLES_ISDXACCESS_ISDX_PORT_MASK_X(x) (((x) & GENMASK(8, 2)) >> 2)
+#define ANA_TABLES_ISDXACCESS_ISDX_TBL_CMD(x) ((x) & GENMASK(1, 0))
+#define ANA_TABLES_ISDXACCESS_ISDX_TBL_CMD_M GENMASK(1, 0)
+
+#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI(x) (((x) << 21) & GENMASK(28, 21))
+#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI_M GENMASK(28, 21)
+#define ANA_TABLES_ISDXTIDX_ISDX_SDLBI_X(x) (((x) & GENMASK(28, 21)) >> 21)
+#define ANA_TABLES_ISDXTIDX_ISDX_MSTI(x) (((x) << 15) & GENMASK(20, 15))
+#define ANA_TABLES_ISDXTIDX_ISDX_MSTI_M GENMASK(20, 15)
+#define ANA_TABLES_ISDXTIDX_ISDX_MSTI_X(x) (((x) & GENMASK(20, 15)) >> 15)
+#define ANA_TABLES_ISDXTIDX_ISDX_ES0_KEY_ENA BIT(14)
+#define ANA_TABLES_ISDXTIDX_ISDX_FORCE_ENA BIT(10)
+#define ANA_TABLES_ISDXTIDX_ISDX_INDEX(x) ((x) & GENMASK(7, 0))
+#define ANA_TABLES_ISDXTIDX_ISDX_INDEX_M GENMASK(7, 0)
+
+#define ANA_TABLES_ENTRYLIM_RSZ 0x4
+
+#define ANA_TABLES_ENTRYLIM_ENTRYLIM(x) (((x) << 14) & GENMASK(17, 14))
+#define ANA_TABLES_ENTRYLIM_ENTRYLIM_M GENMASK(17, 14)
+#define ANA_TABLES_ENTRYLIM_ENTRYLIM_X(x) (((x) & GENMASK(17, 14)) >> 14)
+#define ANA_TABLES_ENTRYLIM_ENTRYSTAT(x) ((x) & GENMASK(13, 0))
+#define ANA_TABLES_ENTRYLIM_ENTRYSTAT_M GENMASK(13, 0)
+
+#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM(x) (((x) << 4) & GENMASK(31, 4))
+#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM_M GENMASK(31, 4)
+#define ANA_TABLES_STREAMACCESS_GEN_REC_SEQ_NUM_X(x) (((x) & GENMASK(31, 4)) >> 4)
+#define ANA_TABLES_STREAMACCESS_SEQ_GEN_REC_ENA BIT(3)
+#define ANA_TABLES_STREAMACCESS_GEN_REC_TYPE BIT(2)
+#define ANA_TABLES_STREAMACCESS_STREAM_TBL_CMD(x) ((x) & GENMASK(1, 0))
+#define ANA_TABLES_STREAMACCESS_STREAM_TBL_CMD_M GENMASK(1, 0)
+
+#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS(x) (((x) << 30) & GENMASK(31, 30))
+#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS_M GENMASK(31, 30)
+#define ANA_TABLES_STREAMTIDX_SEQ_GEN_ERR_STATUS_X(x) (((x) & GENMASK(31, 30)) >> 30)
+#define ANA_TABLES_STREAMTIDX_S_INDEX(x) (((x) << 16) & GENMASK(22, 16))
+#define ANA_TABLES_STREAMTIDX_S_INDEX_M GENMASK(22, 16)
+#define ANA_TABLES_STREAMTIDX_S_INDEX_X(x) (((x) & GENMASK(22, 16)) >> 16)
+#define ANA_TABLES_STREAMTIDX_FORCE_SF_BEHAVIOUR BIT(14)
+#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN(x) (((x) << 8) & GENMASK(13, 8))
+#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN_M GENMASK(13, 8)
+#define ANA_TABLES_STREAMTIDX_SEQ_HISTORY_LEN_X(x) (((x) & GENMASK(13, 8)) >> 8)
+#define ANA_TABLES_STREAMTIDX_RESET_ON_ROGUE BIT(7)
+#define ANA_TABLES_STREAMTIDX_REDTAG_POP BIT(6)
+#define ANA_TABLES_STREAMTIDX_STREAM_SPLIT BIT(5)
+#define ANA_TABLES_STREAMTIDX_SEQ_SPACE_LOG2(x) ((x) & GENMASK(4, 0))
+#define ANA_TABLES_STREAMTIDX_SEQ_SPACE_LOG2_M GENMASK(4, 0)
+
+#define ANA_TABLES_SEQ_MASK_SPLIT_MASK(x) (((x) << 16) & GENMASK(22, 16))
+#define ANA_TABLES_SEQ_MASK_SPLIT_MASK_M GENMASK(22, 16)
+#define ANA_TABLES_SEQ_MASK_SPLIT_MASK_X(x) (((x) & GENMASK(22, 16)) >> 16)
+#define ANA_TABLES_SEQ_MASK_INPUT_PORT_MASK(x) ((x) & GENMASK(6, 0))
+#define ANA_TABLES_SEQ_MASK_INPUT_PORT_MASK_M GENMASK(6, 0)
+
+#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK(x) (((x) << 1) & GENMASK(7, 1))
+#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK_M GENMASK(7, 1)
+#define ANA_TABLES_SFID_MASK_IGR_PORT_MASK_X(x) (((x) & GENMASK(7, 1)) >> 1)
+#define ANA_TABLES_SFID_MASK_IGR_SRCPORT_MATCH_ENA BIT(0)
+
+#define ANA_TABLES_SFIDACCESS_IGR_PRIO_MATCH_ENA BIT(22)
+#define ANA_TABLES_SFIDACCESS_IGR_PRIO(x) (((x) << 19) & GENMASK(21, 19))
+#define ANA_TABLES_SFIDACCESS_IGR_PRIO_M GENMASK(21, 19)
+#define ANA_TABLES_SFIDACCESS_IGR_PRIO_X(x) (((x) & GENMASK(21, 19)) >> 19)
+#define ANA_TABLES_SFIDACCESS_FORCE_BLOCK BIT(18)
+#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN(x) (((x) << 2) & GENMASK(17, 2))
+#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN_M GENMASK(17, 2)
+#define ANA_TABLES_SFIDACCESS_MAX_SDU_LEN_X(x) (((x) & GENMASK(17, 2)) >> 2)
+#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD(x) ((x) & GENMASK(1, 0))
+#define ANA_TABLES_SFIDACCESS_SFID_TBL_CMD_M GENMASK(1, 0)
+
+#define ANA_TABLES_SFIDTIDX_SGID_VALID BIT(26)
+#define ANA_TABLES_SFIDTIDX_SGID(x) (((x) << 18) & GENMASK(25, 18))
+#define ANA_TABLES_SFIDTIDX_SGID_M GENMASK(25, 18)
+#define ANA_TABLES_SFIDTIDX_SGID_X(x) (((x) & GENMASK(25, 18)) >> 18)
+#define ANA_TABLES_SFIDTIDX_POL_ENA BIT(17)
+#define ANA_TABLES_SFIDTIDX_POL_IDX(x) (((x) << 8) & GENMASK(16, 8))
+#define ANA_TABLES_SFIDTIDX_POL_IDX_M GENMASK(16, 8)
+#define ANA_TABLES_SFIDTIDX_POL_IDX_X(x) (((x) & GENMASK(16, 8)) >> 8)
+#define ANA_TABLES_SFIDTIDX_SFID_INDEX(x) ((x) & GENMASK(7, 0))
+#define ANA_TABLES_SFIDTIDX_SFID_INDEX_M GENMASK(7, 0)
+
+#define ANA_MSTI_STATE_RSZ 0x4
+
+#define ANA_OAM_UPM_LM_CNT_RSZ 0x4
+
+#define ANA_SG_ACCESS_CTRL_SGID(x) ((x) & GENMASK(7, 0))
+#define ANA_SG_ACCESS_CTRL_SGID_M GENMASK(7, 0)
+#define ANA_SG_ACCESS_CTRL_CONFIG_CHANGE BIT(28)
+
+#define ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
+#define ANA_SG_CONFIG_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0)
+#define ANA_SG_CONFIG_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(18, 16))
+#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_M GENMASK(18, 16)
+#define ANA_SG_CONFIG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(18, 16)) >> 16)
+#define ANA_SG_CONFIG_REG_3_GATE_ENABLE BIT(20)
+#define ANA_SG_CONFIG_REG_3_INIT_IPS(x) (((x) << 24) & GENMASK(27, 24))
+#define ANA_SG_CONFIG_REG_3_INIT_IPS_M GENMASK(27, 24)
+#define ANA_SG_CONFIG_REG_3_INIT_IPS_X(x) (((x) & GENMASK(27, 24)) >> 24)
+#define ANA_SG_CONFIG_REG_3_INIT_GATE_STATE BIT(28)
+
+#define ANA_SG_GCL_GS_CONFIG_RSZ 0x4
+
+#define ANA_SG_GCL_GS_CONFIG_IPS(x) ((x) & GENMASK(3, 0))
+#define ANA_SG_GCL_GS_CONFIG_IPS_M GENMASK(3, 0)
+#define ANA_SG_GCL_GS_CONFIG_GATE_STATE BIT(4)
+
+#define ANA_SG_GCL_TI_CONFIG_RSZ 0x4
+
+#define ANA_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
+#define ANA_SG_STATUS_REG_3_CFG_CHG_TIME_SEC_MSB_M GENMASK(15, 0)
+#define ANA_SG_STATUS_REG_3_GATE_STATE BIT(16)
+#define ANA_SG_STATUS_REG_3_IPS(x) (((x) << 20) & GENMASK(23, 20))
+#define ANA_SG_STATUS_REG_3_IPS_M GENMASK(23, 20)
+#define ANA_SG_STATUS_REG_3_IPS_X(x) (((x) & GENMASK(23, 20)) >> 20)
+#define ANA_SG_STATUS_REG_3_CONFIG_PENDING BIT(24)
+
+#define ANA_PORT_VLAN_CFG_GSZ 0x100
+
+#define ANA_PORT_VLAN_CFG_VLAN_VID_AS_ISDX BIT(21)
+#define ANA_PORT_VLAN_CFG_VLAN_AWARE_ENA BIT(20)
+#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT(x) (((x) << 18) & GENMASK(19, 18))
+#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT_M GENMASK(19, 18)
+#define ANA_PORT_VLAN_CFG_VLAN_POP_CNT_X(x) (((x) & GENMASK(19, 18)) >> 18)
+#define ANA_PORT_VLAN_CFG_VLAN_INNER_TAG_ENA BIT(17)
+#define ANA_PORT_VLAN_CFG_VLAN_TAG_TYPE BIT(16)
+#define ANA_PORT_VLAN_CFG_VLAN_DEI BIT(15)
+#define ANA_PORT_VLAN_CFG_VLAN_PCP(x) (((x) << 12) & GENMASK(14, 12))
+#define ANA_PORT_VLAN_CFG_VLAN_PCP_M GENMASK(14, 12)
+#define ANA_PORT_VLAN_CFG_VLAN_PCP_X(x) (((x) & GENMASK(14, 12)) >> 12)
+#define ANA_PORT_VLAN_CFG_VLAN_VID(x) ((x) & GENMASK(11, 0))
+#define ANA_PORT_VLAN_CFG_VLAN_VID_M GENMASK(11, 0)
+
+#define ANA_PORT_DROP_CFG_GSZ 0x100
+
+#define ANA_PORT_DROP_CFG_DROP_UNTAGGED_ENA BIT(6)
+#define ANA_PORT_DROP_CFG_DROP_S_TAGGED_ENA BIT(5)
+#define ANA_PORT_DROP_CFG_DROP_C_TAGGED_ENA BIT(4)
+#define ANA_PORT_DROP_CFG_DROP_PRIO_S_TAGGED_ENA BIT(3)
+#define ANA_PORT_DROP_CFG_DROP_PRIO_C_TAGGED_ENA BIT(2)
+#define ANA_PORT_DROP_CFG_DROP_NULL_MAC_ENA BIT(1)
+#define ANA_PORT_DROP_CFG_DROP_MC_SMAC_ENA BIT(0)
+
+#define ANA_PORT_QOS_CFG_GSZ 0x100
+
+#define ANA_PORT_QOS_CFG_DP_DEFAULT_VAL BIT(8)
+#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL(x) (((x) << 5) & GENMASK(7, 5))
+#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_M GENMASK(7, 5)
+#define ANA_PORT_QOS_CFG_QOS_DEFAULT_VAL_X(x) (((x) & GENMASK(7, 5)) >> 5)
+#define ANA_PORT_QOS_CFG_QOS_DSCP_ENA BIT(4)
+#define ANA_PORT_QOS_CFG_QOS_PCP_ENA BIT(3)
+#define ANA_PORT_QOS_CFG_DSCP_TRANSLATE_ENA BIT(2)
+#define ANA_PORT_QOS_CFG_DSCP_REWR_CFG(x) ((x) & GENMASK(1, 0))
+#define ANA_PORT_QOS_CFG_DSCP_REWR_CFG_M GENMASK(1, 0)
+
+#define ANA_PORT_VCAP_CFG_GSZ 0x100
+
+#define ANA_PORT_VCAP_CFG_S1_ENA BIT(14)
+#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA(x) (((x) << 11) & GENMASK(13, 11))
+#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA_M GENMASK(13, 11)
+#define ANA_PORT_VCAP_CFG_S1_DMAC_DIP_ENA_X(x) (((x) & GENMASK(13, 11)) >> 11)
+#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA(x) (((x) << 8) & GENMASK(10, 8))
+#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA_M GENMASK(10, 8)
+#define ANA_PORT_VCAP_CFG_S1_VLAN_INNER_TAG_ENA_X(x) (((x) & GENMASK(10, 8)) >> 8)
+#define ANA_PORT_VCAP_CFG_PAG_VAL(x) ((x) & GENMASK(7, 0))
+#define ANA_PORT_VCAP_CFG_PAG_VAL_M GENMASK(7, 0)
+
+#define ANA_PORT_VCAP_S1_KEY_CFG_GSZ 0x100
+#define ANA_PORT_VCAP_S1_KEY_CFG_RSZ 0x4
+
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG(x) (((x) << 4) & GENMASK(6, 4))
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG_M GENMASK(6, 4)
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP6_CFG_X(x) (((x) & GENMASK(6, 4)) >> 4)
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG(x) (((x) << 2) & GENMASK(3, 2))
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG_M GENMASK(3, 2)
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_IP4_CFG_X(x) (((x) & GENMASK(3, 2)) >> 2)
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_OTHER_CFG(x) ((x) & GENMASK(1, 0))
+#define ANA_PORT_VCAP_S1_KEY_CFG_S1_KEY_OTHER_CFG_M GENMASK(1, 0)
+
+#define ANA_PORT_VCAP_S2_CFG_GSZ 0x100
+
+#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA(x) (((x) << 17) & GENMASK(18, 17))
+#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA_M GENMASK(18, 17)
+#define ANA_PORT_VCAP_S2_CFG_S2_UDP_PAYLOAD_ENA_X(x) (((x) & GENMASK(18, 17)) >> 17)
+#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA(x) (((x) << 15) & GENMASK(16, 15))
+#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA_M GENMASK(16, 15)
+#define ANA_PORT_VCAP_S2_CFG_S2_ETYPE_PAYLOAD_ENA_X(x) (((x) & GENMASK(16, 15)) >> 15)
+#define ANA_PORT_VCAP_S2_CFG_S2_ENA BIT(14)
+#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS(x) (((x) << 12) & GENMASK(13, 12))
+#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_M GENMASK(13, 12)
+#define ANA_PORT_VCAP_S2_CFG_S2_SNAP_DIS_X(x) (((x) & GENMASK(13, 12)) >> 12)
+#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS(x) (((x) << 10) & GENMASK(11, 10))
+#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_M GENMASK(11, 10)
+#define ANA_PORT_VCAP_S2_CFG_S2_ARP_DIS_X(x) (((x) & GENMASK(11, 10)) >> 10)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS(x) (((x) << 8) & GENMASK(9, 8))
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_M GENMASK(9, 8)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_TCPUDP_DIS_X(x) (((x) & GENMASK(9, 8)) >> 8)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS(x) (((x) << 6) & GENMASK(7, 6))
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_M GENMASK(7, 6)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP_OTHER_DIS_X(x) (((x) & GENMASK(7, 6)) >> 6)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG(x) (((x) << 2) & GENMASK(5, 2))
+#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG_M GENMASK(5, 2)
+#define ANA_PORT_VCAP_S2_CFG_S2_IP6_CFG_X(x) (((x) & GENMASK(5, 2)) >> 2)
+#define ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS(x) ((x) & GENMASK(1, 0))
+#define ANA_PORT_VCAP_S2_CFG_S2_OAM_DIS_M GENMASK(1, 0)
+
+#define ANA_PORT_PCP_DEI_MAP_GSZ 0x100
+#define ANA_PORT_PCP_DEI_MAP_RSZ 0x4
+
+#define ANA_PORT_PCP_DEI_MAP_DP_PCP_DEI_VAL BIT(3)
+#define ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL(x) ((x) & GENMASK(2, 0))
+#define ANA_PORT_PCP_DEI_MAP_QOS_PCP_DEI_VAL_M GENMASK(2, 0)
+
+#define ANA_PORT_CPU_FWD_CFG_GSZ 0x100
+
+#define ANA_PORT_CPU_FWD_CFG_CPU_VRAP_REDIR_ENA BIT(7)
+#define ANA_PORT_CPU_FWD_CFG_CPU_MLD_REDIR_ENA BIT(6)
+#define ANA_PORT_CPU_FWD_CFG_CPU_IGMP_REDIR_ENA BIT(5)
+#define ANA_PORT_CPU_FWD_CFG_CPU_IPMC_CTRL_COPY_ENA BIT(4)
+#define ANA_PORT_CPU_FWD_CFG_CPU_SRC_COPY_ENA BIT(3)
+#define ANA_PORT_CPU_FWD_CFG_CPU_ALLBRIDGE_DROP_ENA BIT(2)
+#define ANA_PORT_CPU_FWD_CFG_CPU_ALLBRIDGE_REDIR_ENA BIT(1)
+#define ANA_PORT_CPU_FWD_CFG_CPU_OAM_ENA BIT(0)
+
+#define ANA_PORT_CPU_FWD_BPDU_CFG_GSZ 0x100
+
+#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16))
+#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA_M GENMASK(31, 16)
+#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA(x) ((x) & GENMASK(15, 0))
+#define ANA_PORT_CPU_FWD_BPDU_CFG_BPDU_REDIR_ENA_M GENMASK(15, 0)
+
+#define ANA_PORT_CPU_FWD_GARP_CFG_GSZ 0x100
+
+#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16))
+#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA_M GENMASK(31, 16)
+#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_REDIR_ENA(x) ((x) & GENMASK(15, 0))
+#define ANA_PORT_CPU_FWD_GARP_CFG_GARP_REDIR_ENA_M GENMASK(15, 0)
+
+#define ANA_PORT_CPU_FWD_CCM_CFG_GSZ 0x100
+
+#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA(x) (((x) << 16) & GENMASK(31, 16))
+#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA_M GENMASK(31, 16)
+#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_DROP_ENA_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_REDIR_ENA(x) ((x) & GENMASK(15, 0))
+#define ANA_PORT_CPU_FWD_CCM_CFG_CCM_REDIR_ENA_M GENMASK(15, 0)
+
+#define ANA_PORT_PORT_CFG_GSZ 0x100
+
+#define ANA_PORT_PORT_CFG_SRC_MIRROR_ENA BIT(15)
+#define ANA_PORT_PORT_CFG_LIMIT_DROP BIT(14)
+#define ANA_PORT_PORT_CFG_LIMIT_CPU BIT(13)
+#define ANA_PORT_PORT_CFG_LOCKED_PORTMOVE_DROP BIT(12)
+#define ANA_PORT_PORT_CFG_LOCKED_PORTMOVE_CPU BIT(11)
+#define ANA_PORT_PORT_CFG_LEARNDROP BIT(10)
+#define ANA_PORT_PORT_CFG_LEARNCPU BIT(9)
+#define ANA_PORT_PORT_CFG_LEARNAUTO BIT(8)
+#define ANA_PORT_PORT_CFG_LEARN_ENA BIT(7)
+#define ANA_PORT_PORT_CFG_RECV_ENA BIT(6)
+#define ANA_PORT_PORT_CFG_PORTID_VAL(x) (((x) << 2) & GENMASK(5, 2))
+#define ANA_PORT_PORT_CFG_PORTID_VAL_M GENMASK(5, 2)
+#define ANA_PORT_PORT_CFG_PORTID_VAL_X(x) (((x) & GENMASK(5, 2)) >> 2)
+#define ANA_PORT_PORT_CFG_USE_B_DOM_TBL BIT(1)
+#define ANA_PORT_PORT_CFG_LSR_MODE BIT(0)
+
+#define ANA_PORT_POL_CFG_GSZ 0x100
+
+#define ANA_PORT_POL_CFG_POL_CPU_REDIR_8021 BIT(19)
+#define ANA_PORT_POL_CFG_POL_CPU_REDIR_IP BIT(18)
+#define ANA_PORT_POL_CFG_PORT_POL_ENA BIT(17)
+#define ANA_PORT_POL_CFG_QUEUE_POL_ENA(x) (((x) << 9) & GENMASK(16, 9))
+#define ANA_PORT_POL_CFG_QUEUE_POL_ENA_M GENMASK(16, 9)
+#define ANA_PORT_POL_CFG_QUEUE_POL_ENA_X(x) (((x) & GENMASK(16, 9)) >> 9)
+#define ANA_PORT_POL_CFG_POL_ORDER(x) ((x) & GENMASK(8, 0))
+#define ANA_PORT_POL_CFG_POL_ORDER_M GENMASK(8, 0)
+
+#define ANA_PORT_PTP_CFG_GSZ 0x100
+
+#define ANA_PORT_PTP_CFG_PTP_BACKPLANE_MODE BIT(0)
+
+#define ANA_PORT_PTP_DLY1_CFG_GSZ 0x100
+
+#define ANA_PORT_PTP_DLY2_CFG_GSZ 0x100
+
+#define ANA_PORT_SFID_CFG_GSZ 0x100
+#define ANA_PORT_SFID_CFG_RSZ 0x4
+
+#define ANA_PORT_SFID_CFG_SFID_VALID BIT(8)
+#define ANA_PORT_SFID_CFG_SFID(x) ((x) & GENMASK(7, 0))
+#define ANA_PORT_SFID_CFG_SFID_M GENMASK(7, 0)
+
+#define ANA_PFC_PFC_CFG_GSZ 0x40
+
+#define ANA_PFC_PFC_CFG_RX_PFC_ENA(x) (((x) << 2) & GENMASK(9, 2))
+#define ANA_PFC_PFC_CFG_RX_PFC_ENA_M GENMASK(9, 2)
+#define ANA_PFC_PFC_CFG_RX_PFC_ENA_X(x) (((x) & GENMASK(9, 2)) >> 2)
+#define ANA_PFC_PFC_CFG_FC_LINK_SPEED(x) ((x) & GENMASK(1, 0))
+#define ANA_PFC_PFC_CFG_FC_LINK_SPEED_M GENMASK(1, 0)
+
+#define ANA_PFC_PFC_TIMER_GSZ 0x40
+#define ANA_PFC_PFC_TIMER_RSZ 0x4
+
+#define ANA_IPT_OAM_MEP_CFG_GSZ 0x8
+
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P(x) (((x) << 6) & GENMASK(10, 6))
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P_M GENMASK(10, 6)
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_P_X(x) (((x) & GENMASK(10, 6)) >> 6)
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX(x) (((x) << 1) & GENMASK(5, 1))
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_M GENMASK(5, 1)
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_X(x) (((x) & GENMASK(5, 1)) >> 1)
+#define ANA_IPT_OAM_MEP_CFG_MEP_IDX_ENA BIT(0)
+
+#define ANA_IPT_IPT_GSZ 0x8
+
+#define ANA_IPT_IPT_IPT_CFG(x) (((x) << 15) & GENMASK(16, 15))
+#define ANA_IPT_IPT_IPT_CFG_M GENMASK(16, 15)
+#define ANA_IPT_IPT_IPT_CFG_X(x) (((x) & GENMASK(16, 15)) >> 15)
+#define ANA_IPT_IPT_ISDX_P(x) (((x) << 7) & GENMASK(14, 7))
+#define ANA_IPT_IPT_ISDX_P_M GENMASK(14, 7)
+#define ANA_IPT_IPT_ISDX_P_X(x) (((x) & GENMASK(14, 7)) >> 7)
+#define ANA_IPT_IPT_PPT_IDX(x) ((x) & GENMASK(6, 0))
+#define ANA_IPT_IPT_PPT_IDX_M GENMASK(6, 0)
+
+#define ANA_PPT_PPT_RSZ 0x4
+
+#define ANA_FID_MAP_FID_MAP_RSZ 0x4
+
+#define ANA_FID_MAP_FID_MAP_FID_C_VAL(x) (((x) << 6) & GENMASK(11, 6))
+#define ANA_FID_MAP_FID_MAP_FID_C_VAL_M GENMASK(11, 6)
+#define ANA_FID_MAP_FID_MAP_FID_C_VAL_X(x) (((x) & GENMASK(11, 6)) >> 6)
+#define ANA_FID_MAP_FID_MAP_FID_B_VAL(x) ((x) & GENMASK(5, 0))
+#define ANA_FID_MAP_FID_MAP_FID_B_VAL_M GENMASK(5, 0)
+
+#define ANA_AGGR_CFG_AC_RND_ENA BIT(7)
+#define ANA_AGGR_CFG_AC_DMAC_ENA BIT(6)
+#define ANA_AGGR_CFG_AC_SMAC_ENA BIT(5)
+#define ANA_AGGR_CFG_AC_IP6_FLOW_LBL_ENA BIT(4)
+#define ANA_AGGR_CFG_AC_IP6_TCPUDP_ENA BIT(3)
+#define ANA_AGGR_CFG_AC_IP4_SIPDIP_ENA BIT(2)
+#define ANA_AGGR_CFG_AC_IP4_TCPUDP_ENA BIT(1)
+#define ANA_AGGR_CFG_AC_ISDX_ENA BIT(0)
+
+#define ANA_CPUQ_CFG_CPUQ_MLD(x) (((x) << 27) & GENMASK(29, 27))
+#define ANA_CPUQ_CFG_CPUQ_MLD_M GENMASK(29, 27)
+#define ANA_CPUQ_CFG_CPUQ_MLD_X(x) (((x) & GENMASK(29, 27)) >> 27)
+#define ANA_CPUQ_CFG_CPUQ_IGMP(x) (((x) << 24) & GENMASK(26, 24))
+#define ANA_CPUQ_CFG_CPUQ_IGMP_M GENMASK(26, 24)
+#define ANA_CPUQ_CFG_CPUQ_IGMP_X(x) (((x) & GENMASK(26, 24)) >> 24)
+#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL(x) (((x) << 21) & GENMASK(23, 21))
+#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL_M GENMASK(23, 21)
+#define ANA_CPUQ_CFG_CPUQ_IPMC_CTRL_X(x) (((x) & GENMASK(23, 21)) >> 21)
+#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE(x) (((x) << 18) & GENMASK(20, 18))
+#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE_M GENMASK(20, 18)
+#define ANA_CPUQ_CFG_CPUQ_ALLBRIDGE_X(x) (((x) & GENMASK(20, 18)) >> 18)
+#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE(x) (((x) << 15) & GENMASK(17, 15))
+#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE_M GENMASK(17, 15)
+#define ANA_CPUQ_CFG_CPUQ_LOCKED_PORTMOVE_X(x) (((x) & GENMASK(17, 15)) >> 15)
+#define ANA_CPUQ_CFG_CPUQ_SRC_COPY(x) (((x) << 12) & GENMASK(14, 12))
+#define ANA_CPUQ_CFG_CPUQ_SRC_COPY_M GENMASK(14, 12)
+#define ANA_CPUQ_CFG_CPUQ_SRC_COPY_X(x) (((x) & GENMASK(14, 12)) >> 12)
+#define ANA_CPUQ_CFG_CPUQ_MAC_COPY(x) (((x) << 9) & GENMASK(11, 9))
+#define ANA_CPUQ_CFG_CPUQ_MAC_COPY_M GENMASK(11, 9)
+#define ANA_CPUQ_CFG_CPUQ_MAC_COPY_X(x) (((x) & GENMASK(11, 9)) >> 9)
+#define ANA_CPUQ_CFG_CPUQ_LRN(x) (((x) << 6) & GENMASK(8, 6))
+#define ANA_CPUQ_CFG_CPUQ_LRN_M GENMASK(8, 6)
+#define ANA_CPUQ_CFG_CPUQ_LRN_X(x) (((x) & GENMASK(8, 6)) >> 6)
+#define ANA_CPUQ_CFG_CPUQ_MIRROR(x) (((x) << 3) & GENMASK(5, 3))
+#define ANA_CPUQ_CFG_CPUQ_MIRROR_M GENMASK(5, 3)
+#define ANA_CPUQ_CFG_CPUQ_MIRROR_X(x) (((x) & GENMASK(5, 3)) >> 3)
+#define ANA_CPUQ_CFG_CPUQ_SFLOW(x) ((x) & GENMASK(2, 0))
+#define ANA_CPUQ_CFG_CPUQ_SFLOW_M GENMASK(2, 0)
+
+#define ANA_CPUQ_8021_CFG_RSZ 0x4
+
+#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL(x) (((x) << 6) & GENMASK(8, 6))
+#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL_M GENMASK(8, 6)
+#define ANA_CPUQ_8021_CFG_CPUQ_BPDU_VAL_X(x) (((x) & GENMASK(8, 6)) >> 6)
+#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL(x) (((x) << 3) & GENMASK(5, 3))
+#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL_M GENMASK(5, 3)
+#define ANA_CPUQ_8021_CFG_CPUQ_GARP_VAL_X(x) (((x) & GENMASK(5, 3)) >> 3)
+#define ANA_CPUQ_8021_CFG_CPUQ_CCM_VAL(x) ((x) & GENMASK(2, 0))
+#define ANA_CPUQ_8021_CFG_CPUQ_CCM_VAL_M GENMASK(2, 0)
+
+#define ANA_DSCP_CFG_RSZ 0x4
+
+#define ANA_DSCP_CFG_DP_DSCP_VAL BIT(11)
+#define ANA_DSCP_CFG_QOS_DSCP_VAL(x) (((x) << 8) & GENMASK(10, 8))
+#define ANA_DSCP_CFG_QOS_DSCP_VAL_M GENMASK(10, 8)
+#define ANA_DSCP_CFG_QOS_DSCP_VAL_X(x) (((x) & GENMASK(10, 8)) >> 8)
+#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL(x) (((x) << 2) & GENMASK(7, 2))
+#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_M GENMASK(7, 2)
+#define ANA_DSCP_CFG_DSCP_TRANSLATE_VAL_X(x) (((x) & GENMASK(7, 2)) >> 2)
+#define ANA_DSCP_CFG_DSCP_TRUST_ENA BIT(1)
+#define ANA_DSCP_CFG_DSCP_REWR_ENA BIT(0)
+
+#define ANA_DSCP_REWR_CFG_RSZ 0x4
+
+#define ANA_VCAP_RNG_TYPE_CFG_RSZ 0x4
+
+#define ANA_VCAP_RNG_VAL_CFG_RSZ 0x4
+
+#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL(x) (((x) << 16) & GENMASK(31, 16))
+#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL_M GENMASK(31, 16)
+#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MIN_VAL_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MAX_VAL(x) ((x) & GENMASK(15, 0))
+#define ANA_VCAP_RNG_VAL_CFG_VCAP_RNG_MAX_VAL_M GENMASK(15, 0)
+
+#define ANA_VRAP_CFG_VRAP_VLAN_AWARE_ENA BIT(12)
+#define ANA_VRAP_CFG_VRAP_VID(x) ((x) & GENMASK(11, 0))
+#define ANA_VRAP_CFG_VRAP_VID_M GENMASK(11, 0)
+
+#define ANA_DISCARD_CFG_DROP_TAGGING_ISDX0 BIT(3)
+#define ANA_DISCARD_CFG_DROP_CTRLPROT_ISDX0 BIT(2)
+#define ANA_DISCARD_CFG_DROP_TAGGING_S2_ENA BIT(1)
+#define ANA_DISCARD_CFG_DROP_CTRLPROT_S2_ENA BIT(0)
+
+#define ANA_FID_CFG_VID_MC_ENA BIT(0)
+
+#define ANA_POL_PIR_CFG_GSZ 0x20
+
+#define ANA_POL_PIR_CFG_PIR_RATE(x) (((x) << 6) & GENMASK(20, 6))
+#define ANA_POL_PIR_CFG_PIR_RATE_M GENMASK(20, 6)
+#define ANA_POL_PIR_CFG_PIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6)
+#define ANA_POL_PIR_CFG_PIR_BURST(x) ((x) & GENMASK(5, 0))
+#define ANA_POL_PIR_CFG_PIR_BURST_M GENMASK(5, 0)
+
+#define ANA_POL_CIR_CFG_GSZ 0x20
+
+#define ANA_POL_CIR_CFG_CIR_RATE(x) (((x) << 6) & GENMASK(20, 6))
+#define ANA_POL_CIR_CFG_CIR_RATE_M GENMASK(20, 6)
+#define ANA_POL_CIR_CFG_CIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6)
+#define ANA_POL_CIR_CFG_CIR_BURST(x) ((x) & GENMASK(5, 0))
+#define ANA_POL_CIR_CFG_CIR_BURST_M GENMASK(5, 0)
+
+#define ANA_POL_MODE_CFG_GSZ 0x20
+
+#define ANA_POL_MODE_CFG_IPG_SIZE(x) (((x) << 5) & GENMASK(9, 5))
+#define ANA_POL_MODE_CFG_IPG_SIZE_M GENMASK(9, 5)
+#define ANA_POL_MODE_CFG_IPG_SIZE_X(x) (((x) & GENMASK(9, 5)) >> 5)
+#define ANA_POL_MODE_CFG_FRM_MODE(x) (((x) << 3) & GENMASK(4, 3))
+#define ANA_POL_MODE_CFG_FRM_MODE_M GENMASK(4, 3)
+#define ANA_POL_MODE_CFG_FRM_MODE_X(x) (((x) & GENMASK(4, 3)) >> 3)
+#define ANA_POL_MODE_CFG_DLB_COUPLED BIT(2)
+#define ANA_POL_MODE_CFG_CIR_ENA BIT(1)
+#define ANA_POL_MODE_CFG_OVERSHOOT_ENA BIT(0)
+
+#define ANA_POL_PIR_STATE_GSZ 0x20
+
+#define ANA_POL_CIR_STATE_GSZ 0x20
+
+#define ANA_POL_STATE_GSZ 0x20
+
+#define ANA_POL_FLOWC_RSZ 0x4
+
+#define ANA_POL_FLOWC_POL_FLOWC BIT(0)
+
+#define ANA_POL_HYST_POL_FC_HYST(x) (((x) << 4) & GENMASK(9, 4))
+#define ANA_POL_HYST_POL_FC_HYST_M GENMASK(9, 4)
+#define ANA_POL_HYST_POL_FC_HYST_X(x) (((x) & GENMASK(9, 4)) >> 4)
+#define ANA_POL_HYST_POL_STOP_HYST(x) ((x) & GENMASK(3, 0))
+#define ANA_POL_HYST_POL_STOP_HYST_M GENMASK(3, 0)
+
+#define ANA_POL_MISC_CFG_POL_CLOSE_ALL BIT(1)
+#define ANA_POL_MISC_CFG_POL_LEAK_DIS BIT(0)
+
+#endif
diff --git a/include/soc/mscc/ocelot_dev.h b/include/soc/mscc/ocelot_dev.h
new file mode 100644
index 000000000000..7c08437061fc
--- /dev/null
+++ b/include/soc/mscc/ocelot_dev.h
@@ -0,0 +1,275 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_DEV_H_
+#define _MSCC_OCELOT_DEV_H_
+
+#define DEV_CLOCK_CFG 0x0
+
+#define DEV_CLOCK_CFG_MAC_TX_RST BIT(7)
+#define DEV_CLOCK_CFG_MAC_RX_RST BIT(6)
+#define DEV_CLOCK_CFG_PCS_TX_RST BIT(5)
+#define DEV_CLOCK_CFG_PCS_RX_RST BIT(4)
+#define DEV_CLOCK_CFG_PORT_RST BIT(3)
+#define DEV_CLOCK_CFG_PHY_RST BIT(2)
+#define DEV_CLOCK_CFG_LINK_SPEED(x) ((x) & GENMASK(1, 0))
+#define DEV_CLOCK_CFG_LINK_SPEED_M GENMASK(1, 0)
+
+#define DEV_PORT_MISC 0x4
+
+#define DEV_PORT_MISC_FWD_ERROR_ENA BIT(4)
+#define DEV_PORT_MISC_FWD_PAUSE_ENA BIT(3)
+#define DEV_PORT_MISC_FWD_CTRL_ENA BIT(2)
+#define DEV_PORT_MISC_DEV_LOOP_ENA BIT(1)
+#define DEV_PORT_MISC_HDX_FAST_DIS BIT(0)
+
+#define DEV_EVENTS 0x8
+
+#define DEV_EEE_CFG 0xc
+
+#define DEV_EEE_CFG_EEE_ENA BIT(22)
+#define DEV_EEE_CFG_EEE_TIMER_AGE(x) (((x) << 15) & GENMASK(21, 15))
+#define DEV_EEE_CFG_EEE_TIMER_AGE_M GENMASK(21, 15)
+#define DEV_EEE_CFG_EEE_TIMER_AGE_X(x) (((x) & GENMASK(21, 15)) >> 15)
+#define DEV_EEE_CFG_EEE_TIMER_WAKEUP(x) (((x) << 8) & GENMASK(14, 8))
+#define DEV_EEE_CFG_EEE_TIMER_WAKEUP_M GENMASK(14, 8)
+#define DEV_EEE_CFG_EEE_TIMER_WAKEUP_X(x) (((x) & GENMASK(14, 8)) >> 8)
+#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF(x) (((x) << 1) & GENMASK(7, 1))
+#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_M GENMASK(7, 1)
+#define DEV_EEE_CFG_EEE_TIMER_HOLDOFF_X(x) (((x) & GENMASK(7, 1)) >> 1)
+#define DEV_EEE_CFG_PORT_LPI BIT(0)
+
+#define DEV_RX_PATH_DELAY 0x10
+
+#define DEV_TX_PATH_DELAY 0x14
+
+#define DEV_PTP_PREDICT_CFG 0x18
+
+#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG(x) (((x) << 4) & GENMASK(11, 4))
+#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_M GENMASK(11, 4)
+#define DEV_PTP_PREDICT_CFG_PTP_PHY_PREDICT_CFG_X(x) (((x) & GENMASK(11, 4)) >> 4)
+#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG(x) ((x) & GENMASK(3, 0))
+#define DEV_PTP_PREDICT_CFG_PTP_PHASE_PREDICT_CFG_M GENMASK(3, 0)
+
+#define DEV_MAC_ENA_CFG 0x1c
+
+#define DEV_MAC_ENA_CFG_RX_ENA BIT(4)
+#define DEV_MAC_ENA_CFG_TX_ENA BIT(0)
+
+#define DEV_MAC_MODE_CFG 0x20
+
+#define DEV_MAC_MODE_CFG_FC_WORD_SYNC_ENA BIT(8)
+#define DEV_MAC_MODE_CFG_GIGA_MODE_ENA BIT(4)
+#define DEV_MAC_MODE_CFG_FDX_ENA BIT(0)
+
+#define DEV_MAC_MAXLEN_CFG 0x24
+
+#define DEV_MAC_TAGS_CFG 0x28
+
+#define DEV_MAC_TAGS_CFG_TAG_ID(x) (((x) << 16) & GENMASK(31, 16))
+#define DEV_MAC_TAGS_CFG_TAG_ID_M GENMASK(31, 16)
+#define DEV_MAC_TAGS_CFG_TAG_ID_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define DEV_MAC_TAGS_CFG_VLAN_LEN_AWR_ENA BIT(2)
+#define DEV_MAC_TAGS_CFG_VLAN_DBL_AWR_ENA BIT(1)
+#define DEV_MAC_TAGS_CFG_VLAN_AWR_ENA BIT(0)
+
+#define DEV_MAC_ADV_CHK_CFG 0x2c
+
+#define DEV_MAC_ADV_CHK_CFG_LEN_DROP_ENA BIT(0)
+
+#define DEV_MAC_IFG_CFG 0x30
+
+#define DEV_MAC_IFG_CFG_RESTORE_OLD_IPG_CHECK BIT(17)
+#define DEV_MAC_IFG_CFG_REDUCED_TX_IFG BIT(16)
+#define DEV_MAC_IFG_CFG_TX_IFG(x) (((x) << 8) & GENMASK(12, 8))
+#define DEV_MAC_IFG_CFG_TX_IFG_M GENMASK(12, 8)
+#define DEV_MAC_IFG_CFG_TX_IFG_X(x) (((x) & GENMASK(12, 8)) >> 8)
+#define DEV_MAC_IFG_CFG_RX_IFG2(x) (((x) << 4) & GENMASK(7, 4))
+#define DEV_MAC_IFG_CFG_RX_IFG2_M GENMASK(7, 4)
+#define DEV_MAC_IFG_CFG_RX_IFG2_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define DEV_MAC_IFG_CFG_RX_IFG1(x) ((x) & GENMASK(3, 0))
+#define DEV_MAC_IFG_CFG_RX_IFG1_M GENMASK(3, 0)
+
+#define DEV_MAC_HDX_CFG 0x34
+
+#define DEV_MAC_HDX_CFG_BYPASS_COL_SYNC BIT(26)
+#define DEV_MAC_HDX_CFG_OB_ENA BIT(25)
+#define DEV_MAC_HDX_CFG_WEXC_DIS BIT(24)
+#define DEV_MAC_HDX_CFG_SEED(x) (((x) << 16) & GENMASK(23, 16))
+#define DEV_MAC_HDX_CFG_SEED_M GENMASK(23, 16)
+#define DEV_MAC_HDX_CFG_SEED_X(x) (((x) & GENMASK(23, 16)) >> 16)
+#define DEV_MAC_HDX_CFG_SEED_LOAD BIT(12)
+#define DEV_MAC_HDX_CFG_RETRY_AFTER_EXC_COL_ENA BIT(8)
+#define DEV_MAC_HDX_CFG_LATE_COL_POS(x) ((x) & GENMASK(6, 0))
+#define DEV_MAC_HDX_CFG_LATE_COL_POS_M GENMASK(6, 0)
+
+#define DEV_MAC_DBG_CFG 0x38
+
+#define DEV_MAC_DBG_CFG_TBI_MODE BIT(4)
+#define DEV_MAC_DBG_CFG_IFG_CRS_EXT_CHK_ENA BIT(0)
+
+#define DEV_MAC_FC_MAC_LOW_CFG 0x3c
+
+#define DEV_MAC_FC_MAC_HIGH_CFG 0x40
+
+#define DEV_MAC_STICKY 0x44
+
+#define DEV_MAC_STICKY_RX_IPG_SHRINK_STICKY BIT(9)
+#define DEV_MAC_STICKY_RX_PREAM_SHRINK_STICKY BIT(8)
+#define DEV_MAC_STICKY_RX_CARRIER_EXT_STICKY BIT(7)
+#define DEV_MAC_STICKY_RX_CARRIER_EXT_ERR_STICKY BIT(6)
+#define DEV_MAC_STICKY_RX_JUNK_STICKY BIT(5)
+#define DEV_MAC_STICKY_TX_RETRANSMIT_STICKY BIT(4)
+#define DEV_MAC_STICKY_TX_JAM_STICKY BIT(3)
+#define DEV_MAC_STICKY_TX_FIFO_OFLW_STICKY BIT(2)
+#define DEV_MAC_STICKY_TX_FRM_LEN_OVR_STICKY BIT(1)
+#define DEV_MAC_STICKY_TX_ABORT_STICKY BIT(0)
+
+#define PCS1G_CFG 0x48
+
+#define PCS1G_CFG_LINK_STATUS_TYPE BIT(4)
+#define PCS1G_CFG_AN_LINK_CTRL_ENA BIT(1)
+#define PCS1G_CFG_PCS_ENA BIT(0)
+
+#define PCS1G_MODE_CFG 0x4c
+
+#define PCS1G_MODE_CFG_UNIDIR_MODE_ENA BIT(4)
+#define PCS1G_MODE_CFG_SGMII_MODE_ENA BIT(0)
+
+#define PCS1G_SD_CFG 0x50
+
+#define PCS1G_SD_CFG_SD_SEL BIT(8)
+#define PCS1G_SD_CFG_SD_POL BIT(4)
+#define PCS1G_SD_CFG_SD_ENA BIT(0)
+
+#define PCS1G_ANEG_CFG 0x54
+
+#define PCS1G_ANEG_CFG_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16))
+#define PCS1G_ANEG_CFG_ADV_ABILITY_M GENMASK(31, 16)
+#define PCS1G_ANEG_CFG_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define PCS1G_ANEG_CFG_SW_RESOLVE_ENA BIT(8)
+#define PCS1G_ANEG_CFG_ANEG_RESTART_ONE_SHOT BIT(1)
+#define PCS1G_ANEG_CFG_ANEG_ENA BIT(0)
+
+#define PCS1G_ANEG_NP_CFG 0x58
+
+#define PCS1G_ANEG_NP_CFG_NP_TX(x) (((x) << 16) & GENMASK(31, 16))
+#define PCS1G_ANEG_NP_CFG_NP_TX_M GENMASK(31, 16)
+#define PCS1G_ANEG_NP_CFG_NP_TX_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define PCS1G_ANEG_NP_CFG_NP_LOADED_ONE_SHOT BIT(0)
+
+#define PCS1G_LB_CFG 0x5c
+
+#define PCS1G_LB_CFG_RA_ENA BIT(4)
+#define PCS1G_LB_CFG_GMII_PHY_LB_ENA BIT(1)
+#define PCS1G_LB_CFG_TBI_HOST_LB_ENA BIT(0)
+
+#define PCS1G_DBG_CFG 0x60
+
+#define PCS1G_DBG_CFG_UDLT BIT(0)
+
+#define PCS1G_CDET_CFG 0x64
+
+#define PCS1G_CDET_CFG_CDET_ENA BIT(0)
+
+#define PCS1G_ANEG_STATUS 0x68
+
+#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY(x) (((x) << 16) & GENMASK(31, 16))
+#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_M GENMASK(31, 16)
+#define PCS1G_ANEG_STATUS_LP_ADV_ABILITY_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define PCS1G_ANEG_STATUS_PR BIT(4)
+#define PCS1G_ANEG_STATUS_PAGE_RX_STICKY BIT(3)
+#define PCS1G_ANEG_STATUS_ANEG_COMPLETE BIT(0)
+
+#define PCS1G_ANEG_NP_STATUS 0x6c
+
+#define PCS1G_LINK_STATUS 0x70
+
+#define PCS1G_LINK_STATUS_DELAY_VAR(x) (((x) << 12) & GENMASK(15, 12))
+#define PCS1G_LINK_STATUS_DELAY_VAR_M GENMASK(15, 12)
+#define PCS1G_LINK_STATUS_DELAY_VAR_X(x) (((x) & GENMASK(15, 12)) >> 12)
+#define PCS1G_LINK_STATUS_SIGNAL_DETECT BIT(8)
+#define PCS1G_LINK_STATUS_LINK_STATUS BIT(4)
+#define PCS1G_LINK_STATUS_SYNC_STATUS BIT(0)
+
+#define PCS1G_LINK_DOWN_CNT 0x74
+
+#define PCS1G_STICKY 0x78
+
+#define PCS1G_STICKY_LINK_DOWN_STICKY BIT(4)
+#define PCS1G_STICKY_OUT_OF_SYNC_STICKY BIT(0)
+
+#define PCS1G_DEBUG_STATUS 0x7c
+
+#define PCS1G_LPI_CFG 0x80
+
+#define PCS1G_LPI_CFG_QSGMII_MS_SEL BIT(20)
+#define PCS1G_LPI_CFG_RX_LPI_OUT_DIS BIT(17)
+#define PCS1G_LPI_CFG_LPI_TESTMODE BIT(16)
+#define PCS1G_LPI_CFG_LPI_RX_WTIM(x) (((x) << 4) & GENMASK(5, 4))
+#define PCS1G_LPI_CFG_LPI_RX_WTIM_M GENMASK(5, 4)
+#define PCS1G_LPI_CFG_LPI_RX_WTIM_X(x) (((x) & GENMASK(5, 4)) >> 4)
+#define PCS1G_LPI_CFG_TX_ASSERT_LPIDLE BIT(0)
+
+#define PCS1G_LPI_WAKE_ERROR_CNT 0x84
+
+#define PCS1G_LPI_STATUS 0x88
+
+#define PCS1G_LPI_STATUS_RX_LPI_FAIL BIT(16)
+#define PCS1G_LPI_STATUS_RX_LPI_EVENT_STICKY BIT(12)
+#define PCS1G_LPI_STATUS_RX_QUIET BIT(9)
+#define PCS1G_LPI_STATUS_RX_LPI_MODE BIT(8)
+#define PCS1G_LPI_STATUS_TX_LPI_EVENT_STICKY BIT(4)
+#define PCS1G_LPI_STATUS_TX_QUIET BIT(1)
+#define PCS1G_LPI_STATUS_TX_LPI_MODE BIT(0)
+
+#define PCS1G_TSTPAT_MODE_CFG 0x8c
+
+#define PCS1G_TSTPAT_STATUS 0x90
+
+#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT(x) (((x) << 8) & GENMASK(15, 8))
+#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_M GENMASK(15, 8)
+#define PCS1G_TSTPAT_STATUS_JTP_ERR_CNT_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define PCS1G_TSTPAT_STATUS_JTP_ERR BIT(4)
+#define PCS1G_TSTPAT_STATUS_JTP_LOCK BIT(0)
+
+#define DEV_PCS_FX100_CFG 0x94
+
+#define DEV_PCS_FX100_CFG_SD_SEL BIT(26)
+#define DEV_PCS_FX100_CFG_SD_POL BIT(25)
+#define DEV_PCS_FX100_CFG_SD_ENA BIT(24)
+#define DEV_PCS_FX100_CFG_LOOPBACK_ENA BIT(20)
+#define DEV_PCS_FX100_CFG_SWAP_MII_ENA BIT(16)
+#define DEV_PCS_FX100_CFG_RXBITSEL(x) (((x) << 12) & GENMASK(15, 12))
+#define DEV_PCS_FX100_CFG_RXBITSEL_M GENMASK(15, 12)
+#define DEV_PCS_FX100_CFG_RXBITSEL_X(x) (((x) & GENMASK(15, 12)) >> 12)
+#define DEV_PCS_FX100_CFG_SIGDET_CFG(x) (((x) << 9) & GENMASK(10, 9))
+#define DEV_PCS_FX100_CFG_SIGDET_CFG_M GENMASK(10, 9)
+#define DEV_PCS_FX100_CFG_SIGDET_CFG_X(x) (((x) & GENMASK(10, 9)) >> 9)
+#define DEV_PCS_FX100_CFG_LINKHYST_TM_ENA BIT(8)
+#define DEV_PCS_FX100_CFG_LINKHYSTTIMER(x) (((x) << 4) & GENMASK(7, 4))
+#define DEV_PCS_FX100_CFG_LINKHYSTTIMER_M GENMASK(7, 4)
+#define DEV_PCS_FX100_CFG_LINKHYSTTIMER_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define DEV_PCS_FX100_CFG_UNIDIR_MODE_ENA BIT(3)
+#define DEV_PCS_FX100_CFG_FEFCHK_ENA BIT(2)
+#define DEV_PCS_FX100_CFG_FEFGEN_ENA BIT(1)
+#define DEV_PCS_FX100_CFG_PCS_ENA BIT(0)
+
+#define DEV_PCS_FX100_STATUS 0x98
+
+#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP(x) (((x) << 8) & GENMASK(11, 8))
+#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_M GENMASK(11, 8)
+#define DEV_PCS_FX100_STATUS_EDGE_POS_PTP_X(x) (((x) & GENMASK(11, 8)) >> 8)
+#define DEV_PCS_FX100_STATUS_PCS_ERROR_STICKY BIT(7)
+#define DEV_PCS_FX100_STATUS_FEF_FOUND_STICKY BIT(6)
+#define DEV_PCS_FX100_STATUS_SSD_ERROR_STICKY BIT(5)
+#define DEV_PCS_FX100_STATUS_SYNC_LOST_STICKY BIT(4)
+#define DEV_PCS_FX100_STATUS_FEF_STATUS BIT(2)
+#define DEV_PCS_FX100_STATUS_SIGNAL_DETECT BIT(1)
+#define DEV_PCS_FX100_STATUS_SYNC_STATUS BIT(0)
+
+#endif
diff --git a/include/soc/mscc/ocelot_qsys.h b/include/soc/mscc/ocelot_qsys.h
new file mode 100644
index 000000000000..d8c63aa761be
--- /dev/null
+++ b/include/soc/mscc/ocelot_qsys.h
@@ -0,0 +1,270 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_QSYS_H_
+#define _MSCC_OCELOT_QSYS_H_
+
+#define QSYS_PORT_MODE_RSZ 0x4
+
+#define QSYS_PORT_MODE_DEQUEUE_DIS BIT(1)
+#define QSYS_PORT_MODE_DEQUEUE_LATE BIT(0)
+
+#define QSYS_SWITCH_PORT_MODE_RSZ 0x4
+
+#define QSYS_SWITCH_PORT_MODE_PORT_ENA BIT(14)
+#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG(x) (((x) << 11) & GENMASK(13, 11))
+#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_M GENMASK(13, 11)
+#define QSYS_SWITCH_PORT_MODE_SCH_NEXT_CFG_X(x) (((x) & GENMASK(13, 11)) >> 11)
+#define QSYS_SWITCH_PORT_MODE_YEL_RSRVD BIT(10)
+#define QSYS_SWITCH_PORT_MODE_INGRESS_DROP_MODE BIT(9)
+#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA(x) (((x) << 1) & GENMASK(8, 1))
+#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_M GENMASK(8, 1)
+#define QSYS_SWITCH_PORT_MODE_TX_PFC_ENA_X(x) (((x) & GENMASK(8, 1)) >> 1)
+#define QSYS_SWITCH_PORT_MODE_TX_PFC_MODE BIT(0)
+
+#define QSYS_STAT_CNT_CFG_TX_GREEN_CNT_MODE BIT(5)
+#define QSYS_STAT_CNT_CFG_TX_YELLOW_CNT_MODE BIT(4)
+#define QSYS_STAT_CNT_CFG_DROP_GREEN_CNT_MODE BIT(3)
+#define QSYS_STAT_CNT_CFG_DROP_YELLOW_CNT_MODE BIT(2)
+#define QSYS_STAT_CNT_CFG_DROP_COUNT_ONCE BIT(1)
+#define QSYS_STAT_CNT_CFG_DROP_COUNT_EGRESS BIT(0)
+
+#define QSYS_EEE_CFG_RSZ 0x4
+
+#define QSYS_EEE_THRES_EEE_HIGH_BYTES(x) (((x) << 8) & GENMASK(15, 8))
+#define QSYS_EEE_THRES_EEE_HIGH_BYTES_M GENMASK(15, 8)
+#define QSYS_EEE_THRES_EEE_HIGH_BYTES_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define QSYS_EEE_THRES_EEE_HIGH_FRAMES(x) ((x) & GENMASK(7, 0))
+#define QSYS_EEE_THRES_EEE_HIGH_FRAMES_M GENMASK(7, 0)
+
+#define QSYS_SW_STATUS_RSZ 0x4
+
+#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT(x) (((x) << 8) & GENMASK(12, 8))
+#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT_M GENMASK(12, 8)
+#define QSYS_EXT_CPU_CFG_EXT_CPU_PORT_X(x) (((x) & GENMASK(12, 8)) >> 8)
+#define QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK(x) ((x) & GENMASK(7, 0))
+#define QSYS_EXT_CPU_CFG_EXT_CPUQ_MSK_M GENMASK(7, 0)
+
+#define QSYS_QMAP_GSZ 0x4
+
+#define QSYS_QMAP_SE_BASE(x) (((x) << 5) & GENMASK(12, 5))
+#define QSYS_QMAP_SE_BASE_M GENMASK(12, 5)
+#define QSYS_QMAP_SE_BASE_X(x) (((x) & GENMASK(12, 5)) >> 5)
+#define QSYS_QMAP_SE_IDX_SEL(x) (((x) << 2) & GENMASK(4, 2))
+#define QSYS_QMAP_SE_IDX_SEL_M GENMASK(4, 2)
+#define QSYS_QMAP_SE_IDX_SEL_X(x) (((x) & GENMASK(4, 2)) >> 2)
+#define QSYS_QMAP_SE_INP_SEL(x) ((x) & GENMASK(1, 0))
+#define QSYS_QMAP_SE_INP_SEL_M GENMASK(1, 0)
+
+#define QSYS_ISDX_SGRP_GSZ 0x4
+
+#define QSYS_TIMED_FRAME_ENTRY_GSZ 0x4
+
+#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT(x) (((x) << 9) & GENMASK(18, 9))
+#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT_M GENMASK(18, 9)
+#define QSYS_TFRM_MISC_TIMED_CANCEL_SLOT_X(x) (((x) & GENMASK(18, 9)) >> 9)
+#define QSYS_TFRM_MISC_TIMED_CANCEL_1SHOT BIT(8)
+#define QSYS_TFRM_MISC_TIMED_SLOT_MODE_MC BIT(7)
+#define QSYS_TFRM_MISC_TIMED_ENTRY_FAST_CNT(x) ((x) & GENMASK(6, 0))
+#define QSYS_TFRM_MISC_TIMED_ENTRY_FAST_CNT_M GENMASK(6, 0)
+
+#define QSYS_RED_PROFILE_RSZ 0x4
+
+#define QSYS_RED_PROFILE_WM_RED_LOW(x) (((x) << 8) & GENMASK(15, 8))
+#define QSYS_RED_PROFILE_WM_RED_LOW_M GENMASK(15, 8)
+#define QSYS_RED_PROFILE_WM_RED_LOW_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define QSYS_RED_PROFILE_WM_RED_HIGH(x) ((x) & GENMASK(7, 0))
+#define QSYS_RED_PROFILE_WM_RED_HIGH_M GENMASK(7, 0)
+
+#define QSYS_RES_CFG_GSZ 0x8
+
+#define QSYS_RES_STAT_GSZ 0x8
+
+#define QSYS_RES_STAT_INUSE(x) (((x) << 12) & GENMASK(23, 12))
+#define QSYS_RES_STAT_INUSE_M GENMASK(23, 12)
+#define QSYS_RES_STAT_INUSE_X(x) (((x) & GENMASK(23, 12)) >> 12)
+#define QSYS_RES_STAT_MAXUSE(x) ((x) & GENMASK(11, 0))
+#define QSYS_RES_STAT_MAXUSE_M GENMASK(11, 0)
+
+#define QSYS_EVENTS_CORE_EV_FDC(x) (((x) << 2) & GENMASK(4, 2))
+#define QSYS_EVENTS_CORE_EV_FDC_M GENMASK(4, 2)
+#define QSYS_EVENTS_CORE_EV_FDC_X(x) (((x) & GENMASK(4, 2)) >> 2)
+#define QSYS_EVENTS_CORE_EV_FRD(x) ((x) & GENMASK(1, 0))
+#define QSYS_EVENTS_CORE_EV_FRD_M GENMASK(1, 0)
+
+#define QSYS_QMAXSDU_CFG_0_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_1_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_2_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_3_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_4_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_5_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_6_RSZ 0x4
+
+#define QSYS_QMAXSDU_CFG_7_RSZ 0x4
+
+#define QSYS_PREEMPTION_CFG_RSZ 0x4
+
+#define QSYS_PREEMPTION_CFG_P_QUEUES(x) ((x) & GENMASK(7, 0))
+#define QSYS_PREEMPTION_CFG_P_QUEUES_M GENMASK(7, 0)
+#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE(x) (((x) << 8) & GENMASK(9, 8))
+#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_M GENMASK(9, 8)
+#define QSYS_PREEMPTION_CFG_MM_ADD_FRAG_SIZE_X(x) (((x) & GENMASK(9, 8)) >> 8)
+#define QSYS_PREEMPTION_CFG_STRICT_IPG(x) (((x) << 12) & GENMASK(13, 12))
+#define QSYS_PREEMPTION_CFG_STRICT_IPG_M GENMASK(13, 12)
+#define QSYS_PREEMPTION_CFG_STRICT_IPG_X(x) (((x) & GENMASK(13, 12)) >> 12)
+#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE(x) (((x) << 16) & GENMASK(31, 16))
+#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE_M GENMASK(31, 16)
+#define QSYS_PREEMPTION_CFG_HOLD_ADVANCE_X(x) (((x) & GENMASK(31, 16)) >> 16)
+
+#define QSYS_CIR_CFG_GSZ 0x80
+
+#define QSYS_CIR_CFG_CIR_RATE(x) (((x) << 6) & GENMASK(20, 6))
+#define QSYS_CIR_CFG_CIR_RATE_M GENMASK(20, 6)
+#define QSYS_CIR_CFG_CIR_RATE_X(x) (((x) & GENMASK(20, 6)) >> 6)
+#define QSYS_CIR_CFG_CIR_BURST(x) ((x) & GENMASK(5, 0))
+#define QSYS_CIR_CFG_CIR_BURST_M GENMASK(5, 0)
+
+#define QSYS_EIR_CFG_GSZ 0x80
+
+#define QSYS_EIR_CFG_EIR_RATE(x) (((x) << 7) & GENMASK(21, 7))
+#define QSYS_EIR_CFG_EIR_RATE_M GENMASK(21, 7)
+#define QSYS_EIR_CFG_EIR_RATE_X(x) (((x) & GENMASK(21, 7)) >> 7)
+#define QSYS_EIR_CFG_EIR_BURST(x) (((x) << 1) & GENMASK(6, 1))
+#define QSYS_EIR_CFG_EIR_BURST_M GENMASK(6, 1)
+#define QSYS_EIR_CFG_EIR_BURST_X(x) (((x) & GENMASK(6, 1)) >> 1)
+#define QSYS_EIR_CFG_EIR_MARK_ENA BIT(0)
+
+#define QSYS_SE_CFG_GSZ 0x80
+
+#define QSYS_SE_CFG_SE_DWRR_CNT(x) (((x) << 6) & GENMASK(9, 6))
+#define QSYS_SE_CFG_SE_DWRR_CNT_M GENMASK(9, 6)
+#define QSYS_SE_CFG_SE_DWRR_CNT_X(x) (((x) & GENMASK(9, 6)) >> 6)
+#define QSYS_SE_CFG_SE_RR_ENA BIT(5)
+#define QSYS_SE_CFG_SE_AVB_ENA BIT(4)
+#define QSYS_SE_CFG_SE_FRM_MODE(x) (((x) << 2) & GENMASK(3, 2))
+#define QSYS_SE_CFG_SE_FRM_MODE_M GENMASK(3, 2)
+#define QSYS_SE_CFG_SE_FRM_MODE_X(x) (((x) & GENMASK(3, 2)) >> 2)
+#define QSYS_SE_CFG_SE_EXC_ENA BIT(1)
+#define QSYS_SE_CFG_SE_EXC_FWD BIT(0)
+
+#define QSYS_SE_DWRR_CFG_GSZ 0x80
+#define QSYS_SE_DWRR_CFG_RSZ 0x4
+
+#define QSYS_SE_CONNECT_GSZ 0x80
+
+#define QSYS_SE_CONNECT_SE_OUTP_IDX(x) (((x) << 17) & GENMASK(24, 17))
+#define QSYS_SE_CONNECT_SE_OUTP_IDX_M GENMASK(24, 17)
+#define QSYS_SE_CONNECT_SE_OUTP_IDX_X(x) (((x) & GENMASK(24, 17)) >> 17)
+#define QSYS_SE_CONNECT_SE_INP_IDX(x) (((x) << 9) & GENMASK(16, 9))
+#define QSYS_SE_CONNECT_SE_INP_IDX_M GENMASK(16, 9)
+#define QSYS_SE_CONNECT_SE_INP_IDX_X(x) (((x) & GENMASK(16, 9)) >> 9)
+#define QSYS_SE_CONNECT_SE_OUTP_CON(x) (((x) << 5) & GENMASK(8, 5))
+#define QSYS_SE_CONNECT_SE_OUTP_CON_M GENMASK(8, 5)
+#define QSYS_SE_CONNECT_SE_OUTP_CON_X(x) (((x) & GENMASK(8, 5)) >> 5)
+#define QSYS_SE_CONNECT_SE_INP_CNT(x) (((x) << 1) & GENMASK(4, 1))
+#define QSYS_SE_CONNECT_SE_INP_CNT_M GENMASK(4, 1)
+#define QSYS_SE_CONNECT_SE_INP_CNT_X(x) (((x) & GENMASK(4, 1)) >> 1)
+#define QSYS_SE_CONNECT_SE_TERMINAL BIT(0)
+
+#define QSYS_SE_DLB_SENSE_GSZ 0x80
+
+#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO(x) (((x) << 11) & GENMASK(13, 11))
+#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_M GENMASK(13, 11)
+#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_X(x) (((x) & GENMASK(13, 11)) >> 11)
+#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT(x) (((x) << 7) & GENMASK(10, 7))
+#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_M GENMASK(10, 7)
+#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_X(x) (((x) & GENMASK(10, 7)) >> 7)
+#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT(x) (((x) << 3) & GENMASK(6, 3))
+#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_M GENMASK(6, 3)
+#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_X(x) (((x) & GENMASK(6, 3)) >> 3)
+#define QSYS_SE_DLB_SENSE_SE_DLB_PRIO_ENA BIT(2)
+#define QSYS_SE_DLB_SENSE_SE_DLB_SPORT_ENA BIT(1)
+#define QSYS_SE_DLB_SENSE_SE_DLB_DPORT_ENA BIT(0)
+
+#define QSYS_CIR_STATE_GSZ 0x80
+
+#define QSYS_CIR_STATE_CIR_LVL(x) (((x) << 4) & GENMASK(25, 4))
+#define QSYS_CIR_STATE_CIR_LVL_M GENMASK(25, 4)
+#define QSYS_CIR_STATE_CIR_LVL_X(x) (((x) & GENMASK(25, 4)) >> 4)
+#define QSYS_CIR_STATE_SHP_TIME(x) ((x) & GENMASK(3, 0))
+#define QSYS_CIR_STATE_SHP_TIME_M GENMASK(3, 0)
+
+#define QSYS_EIR_STATE_GSZ 0x80
+
+#define QSYS_SE_STATE_GSZ 0x80
+
+#define QSYS_SE_STATE_SE_OUTP_LVL(x) (((x) << 1) & GENMASK(2, 1))
+#define QSYS_SE_STATE_SE_OUTP_LVL_M GENMASK(2, 1)
+#define QSYS_SE_STATE_SE_OUTP_LVL_X(x) (((x) & GENMASK(2, 1)) >> 1)
+#define QSYS_SE_STATE_SE_WAS_YEL BIT(0)
+
+#define QSYS_HSCH_MISC_CFG_SE_CONNECT_VLD BIT(8)
+#define QSYS_HSCH_MISC_CFG_FRM_ADJ(x) (((x) << 3) & GENMASK(7, 3))
+#define QSYS_HSCH_MISC_CFG_FRM_ADJ_M GENMASK(7, 3)
+#define QSYS_HSCH_MISC_CFG_FRM_ADJ_X(x) (((x) & GENMASK(7, 3)) >> 3)
+#define QSYS_HSCH_MISC_CFG_LEAK_DIS BIT(2)
+#define QSYS_HSCH_MISC_CFG_QSHP_EXC_ENA BIT(1)
+#define QSYS_HSCH_MISC_CFG_PFC_BYP_UPD BIT(0)
+
+#define QSYS_TAG_CONFIG_RSZ 0x4
+
+#define QSYS_TAG_CONFIG_ENABLE BIT(0)
+#define QSYS_TAG_CONFIG_LINK_SPEED(x) (((x) << 4) & GENMASK(5, 4))
+#define QSYS_TAG_CONFIG_LINK_SPEED_M GENMASK(5, 4)
+#define QSYS_TAG_CONFIG_LINK_SPEED_X(x) (((x) & GENMASK(5, 4)) >> 4)
+#define QSYS_TAG_CONFIG_INIT_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8))
+#define QSYS_TAG_CONFIG_INIT_GATE_STATE_M GENMASK(15, 8)
+#define QSYS_TAG_CONFIG_INIT_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8)
+#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES(x) (((x) << 16) & GENMASK(23, 16))
+#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_M GENMASK(23, 16)
+#define QSYS_TAG_CONFIG_SCH_TRAFFIC_QUEUES_X(x) (((x) & GENMASK(23, 16)) >> 16)
+
+#define QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM(x) ((x) & GENMASK(7, 0))
+#define QSYS_TAS_PARAM_CFG_CTRL_PORT_NUM_M GENMASK(7, 0)
+#define QSYS_TAS_PARAM_CFG_CTRL_ALWAYS_GUARD_BAND_SCH_Q BIT(8)
+#define QSYS_TAS_PARAM_CFG_CTRL_CONFIG_CHANGE BIT(16)
+
+#define QSYS_PORT_MAX_SDU_RSZ 0x4
+
+#define QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
+#define QSYS_PARAM_CFG_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0)
+#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(31, 16))
+#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH_M GENMASK(31, 16)
+#define QSYS_PARAM_CFG_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(31, 16)) >> 16)
+
+#define QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM(x) ((x) & GENMASK(5, 0))
+#define QSYS_GCL_CFG_REG_1_GCL_ENTRY_NUM_M GENMASK(5, 0)
+#define QSYS_GCL_CFG_REG_1_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8))
+#define QSYS_GCL_CFG_REG_1_GATE_STATE_M GENMASK(15, 8)
+#define QSYS_GCL_CFG_REG_1_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8)
+
+#define QSYS_PARAM_STATUS_REG_3_BASE_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
+#define QSYS_PARAM_STATUS_REG_3_BASE_TIME_SEC_MSB_M GENMASK(15, 0)
+#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH(x) (((x) << 16) & GENMASK(31, 16))
+#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH_M GENMASK(31, 16)
+#define QSYS_PARAM_STATUS_REG_3_LIST_LENGTH_X(x) (((x) & GENMASK(31, 16)) >> 16)
+
+#define QSYS_PARAM_STATUS_REG_8_CFG_CHG_TIME_SEC_MSB(x) ((x) & GENMASK(15, 0))
+#define QSYS_PARAM_STATUS_REG_8_CFG_CHG_TIME_SEC_MSB_M GENMASK(15, 0)
+#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE(x) (((x) << 16) & GENMASK(23, 16))
+#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE_M GENMASK(23, 16)
+#define QSYS_PARAM_STATUS_REG_8_OPER_GATE_STATE_X(x) (((x) & GENMASK(23, 16)) >> 16)
+#define QSYS_PARAM_STATUS_REG_8_CONFIG_PENDING BIT(24)
+
+#define QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM(x) ((x) & GENMASK(5, 0))
+#define QSYS_GCL_STATUS_REG_1_GCL_ENTRY_NUM_M GENMASK(5, 0)
+#define QSYS_GCL_STATUS_REG_1_GATE_STATE(x) (((x) << 8) & GENMASK(15, 8))
+#define QSYS_GCL_STATUS_REG_1_GATE_STATE_M GENMASK(15, 8)
+#define QSYS_GCL_STATUS_REG_1_GATE_STATE_X(x) (((x) & GENMASK(15, 8)) >> 8)
+
+#endif
diff --git a/include/soc/mscc/ocelot_sys.h b/include/soc/mscc/ocelot_sys.h
new file mode 100644
index 000000000000..16f91e172bcb
--- /dev/null
+++ b/include/soc/mscc/ocelot_sys.h
@@ -0,0 +1,144 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR MIT) */
+/*
+ * Microsemi Ocelot Switch driver
+ *
+ * Copyright (c) 2017 Microsemi Corporation
+ */
+
+#ifndef _MSCC_OCELOT_SYS_H_
+#define _MSCC_OCELOT_SYS_H_
+
+#define SYS_COUNT_RX_OCTETS_RSZ 0x4
+
+#define SYS_COUNT_TX_OCTETS_RSZ 0x4
+
+#define SYS_PORT_MODE_RSZ 0x4
+
+#define SYS_PORT_MODE_DATA_WO_TS(x) (((x) << 5) & GENMASK(6, 5))
+#define SYS_PORT_MODE_DATA_WO_TS_M GENMASK(6, 5)
+#define SYS_PORT_MODE_DATA_WO_TS_X(x) (((x) & GENMASK(6, 5)) >> 5)
+#define SYS_PORT_MODE_INCL_INJ_HDR(x) (((x) << 3) & GENMASK(4, 3))
+#define SYS_PORT_MODE_INCL_INJ_HDR_M GENMASK(4, 3)
+#define SYS_PORT_MODE_INCL_INJ_HDR_X(x) (((x) & GENMASK(4, 3)) >> 3)
+#define SYS_PORT_MODE_INCL_XTR_HDR(x) (((x) << 1) & GENMASK(2, 1))
+#define SYS_PORT_MODE_INCL_XTR_HDR_M GENMASK(2, 1)
+#define SYS_PORT_MODE_INCL_XTR_HDR_X(x) (((x) & GENMASK(2, 1)) >> 1)
+#define SYS_PORT_MODE_INJ_HDR_ERR BIT(0)
+
+#define SYS_FRONT_PORT_MODE_RSZ 0x4
+
+#define SYS_FRONT_PORT_MODE_HDX_MODE BIT(0)
+
+#define SYS_FRM_AGING_AGE_TX_ENA BIT(20)
+#define SYS_FRM_AGING_MAX_AGE(x) ((x) & GENMASK(19, 0))
+#define SYS_FRM_AGING_MAX_AGE_M GENMASK(19, 0)
+
+#define SYS_STAT_CFG_STAT_CLEAR_SHOT(x) (((x) << 10) & GENMASK(16, 10))
+#define SYS_STAT_CFG_STAT_CLEAR_SHOT_M GENMASK(16, 10)
+#define SYS_STAT_CFG_STAT_CLEAR_SHOT_X(x) (((x) & GENMASK(16, 10)) >> 10)
+#define SYS_STAT_CFG_STAT_VIEW(x) ((x) & GENMASK(9, 0))
+#define SYS_STAT_CFG_STAT_VIEW_M GENMASK(9, 0)
+
+#define SYS_SW_STATUS_RSZ 0x4
+
+#define SYS_SW_STATUS_PORT_RX_PAUSED BIT(0)
+
+#define SYS_MISC_CFG_PTP_RSRV_CLR BIT(1)
+#define SYS_MISC_CFG_PTP_DIS_NEG_RO BIT(0)
+
+#define SYS_REW_MAC_HIGH_CFG_RSZ 0x4
+
+#define SYS_REW_MAC_LOW_CFG_RSZ 0x4
+
+#define SYS_TIMESTAMP_OFFSET_ETH_TYPE_CFG(x) (((x) << 6) & GENMASK(21, 6))
+#define SYS_TIMESTAMP_OFFSET_ETH_TYPE_CFG_M GENMASK(21, 6)
+#define SYS_TIMESTAMP_OFFSET_ETH_TYPE_CFG_X(x) (((x) & GENMASK(21, 6)) >> 6)
+#define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET(x) ((x) & GENMASK(5, 0))
+#define SYS_TIMESTAMP_OFFSET_TIMESTAMP_OFFSET_M GENMASK(5, 0)
+
+#define SYS_PAUSE_CFG_RSZ 0x4
+
+#define SYS_PAUSE_CFG_PAUSE_START(x) (((x) << 10) & GENMASK(18, 10))
+#define SYS_PAUSE_CFG_PAUSE_START_M GENMASK(18, 10)
+#define SYS_PAUSE_CFG_PAUSE_START_X(x) (((x) & GENMASK(18, 10)) >> 10)
+#define SYS_PAUSE_CFG_PAUSE_STOP(x) (((x) << 1) & GENMASK(9, 1))
+#define SYS_PAUSE_CFG_PAUSE_STOP_M GENMASK(9, 1)
+#define SYS_PAUSE_CFG_PAUSE_STOP_X(x) (((x) & GENMASK(9, 1)) >> 1)
+#define SYS_PAUSE_CFG_PAUSE_ENA BIT(0)
+
+#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START(x) (((x) << 9) & GENMASK(17, 9))
+#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_M GENMASK(17, 9)
+#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_START_X(x) (((x) & GENMASK(17, 9)) >> 9)
+#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_STOP(x) ((x) & GENMASK(8, 0))
+#define SYS_PAUSE_TOT_CFG_PAUSE_TOT_STOP_M GENMASK(8, 0)
+
+#define SYS_ATOP_RSZ 0x4
+
+#define SYS_MAC_FC_CFG_RSZ 0x4
+
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED(x) (((x) << 26) & GENMASK(27, 26))
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED_M GENMASK(27, 26)
+#define SYS_MAC_FC_CFG_FC_LINK_SPEED_X(x) (((x) & GENMASK(27, 26)) >> 26)
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG(x) (((x) << 20) & GENMASK(25, 20))
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_M GENMASK(25, 20)
+#define SYS_MAC_FC_CFG_FC_LATENCY_CFG_X(x) (((x) & GENMASK(25, 20)) >> 20)
+#define SYS_MAC_FC_CFG_ZERO_PAUSE_ENA BIT(18)
+#define SYS_MAC_FC_CFG_TX_FC_ENA BIT(17)
+#define SYS_MAC_FC_CFG_RX_FC_ENA BIT(16)
+#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG(x) ((x) & GENMASK(15, 0))
+#define SYS_MAC_FC_CFG_PAUSE_VAL_CFG_M GENMASK(15, 0)
+
+#define SYS_MMGT_RELCNT(x) (((x) << 16) & GENMASK(31, 16))
+#define SYS_MMGT_RELCNT_M GENMASK(31, 16)
+#define SYS_MMGT_RELCNT_X(x) (((x) & GENMASK(31, 16)) >> 16)
+#define SYS_MMGT_FREECNT(x) ((x) & GENMASK(15, 0))
+#define SYS_MMGT_FREECNT_M GENMASK(15, 0)
+
+#define SYS_MMGT_FAST_FREEVLD(x) (((x) << 4) & GENMASK(7, 4))
+#define SYS_MMGT_FAST_FREEVLD_M GENMASK(7, 4)
+#define SYS_MMGT_FAST_FREEVLD_X(x) (((x) & GENMASK(7, 4)) >> 4)
+#define SYS_MMGT_FAST_RELVLD(x) ((x) & GENMASK(3, 0))
+#define SYS_MMGT_FAST_RELVLD_M GENMASK(3, 0)
+
+#define SYS_EVENTS_DIF_RSZ 0x4
+
+#define SYS_EVENTS_DIF_EV_DRX(x) (((x) << 6) & GENMASK(8, 6))
+#define SYS_EVENTS_DIF_EV_DRX_M GENMASK(8, 6)
+#define SYS_EVENTS_DIF_EV_DRX_X(x) (((x) & GENMASK(8, 6)) >> 6)
+#define SYS_EVENTS_DIF_EV_DTX(x) ((x) & GENMASK(5, 0))
+#define SYS_EVENTS_DIF_EV_DTX_M GENMASK(5, 0)
+
+#define SYS_EVENTS_CORE_EV_FWR BIT(2)
+#define SYS_EVENTS_CORE_EV_ANA(x) ((x) & GENMASK(1, 0))
+#define SYS_EVENTS_CORE_EV_ANA_M GENMASK(1, 0)
+
+#define SYS_CNT_GSZ 0x4
+
+#define SYS_PTP_STATUS_PTP_TXSTAMP_OAM BIT(29)
+#define SYS_PTP_STATUS_PTP_OVFL BIT(28)
+#define SYS_PTP_STATUS_PTP_MESS_VLD BIT(27)
+#define SYS_PTP_STATUS_PTP_MESS_ID(x) (((x) << 21) & GENMASK(26, 21))
+#define SYS_PTP_STATUS_PTP_MESS_ID_M GENMASK(26, 21)
+#define SYS_PTP_STATUS_PTP_MESS_ID_X(x) (((x) & GENMASK(26, 21)) >> 21)
+#define SYS_PTP_STATUS_PTP_MESS_TXPORT(x) (((x) << 16) & GENMASK(20, 16))
+#define SYS_PTP_STATUS_PTP_MESS_TXPORT_M GENMASK(20, 16)
+#define SYS_PTP_STATUS_PTP_MESS_TXPORT_X(x) (((x) & GENMASK(20, 16)) >> 16)
+#define SYS_PTP_STATUS_PTP_MESS_SEQ_ID(x) ((x) & GENMASK(15, 0))
+#define SYS_PTP_STATUS_PTP_MESS_SEQ_ID_M GENMASK(15, 0)
+
+#define SYS_PTP_TXSTAMP_PTP_TXSTAMP(x) ((x) & GENMASK(29, 0))
+#define SYS_PTP_TXSTAMP_PTP_TXSTAMP_M GENMASK(29, 0)
+#define SYS_PTP_TXSTAMP_PTP_TXSTAMP_SEC BIT(31)
+
+#define SYS_PTP_NXT_PTP_NXT BIT(0)
+
+#define SYS_PTP_CFG_PTP_STAMP_WID(x) (((x) << 2) & GENMASK(7, 2))
+#define SYS_PTP_CFG_PTP_STAMP_WID_M GENMASK(7, 2)
+#define SYS_PTP_CFG_PTP_STAMP_WID_X(x) (((x) & GENMASK(7, 2)) >> 2)
+#define SYS_PTP_CFG_PTP_CF_ROLL_MODE(x) ((x) & GENMASK(1, 0))
+#define SYS_PTP_CFG_PTP_CF_ROLL_MODE_M GENMASK(1, 0)
+
+#define SYS_RAM_INIT_RAM_INIT BIT(1)
+#define SYS_RAM_INIT_RAM_CFG_HOOK BIT(0)
+
+#endif
diff --git a/include/soc/qcom/ocmem.h b/include/soc/qcom/ocmem.h
new file mode 100644
index 000000000000..02a8bc2677b1
--- /dev/null
+++ b/include/soc/qcom/ocmem.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * The On Chip Memory (OCMEM) allocator allows various clients to allocate
+ * memory from OCMEM based on performance, latency and power requirements.
+ * This is typically used by the GPU, camera/video, and audio components on
+ * some Snapdragon SoCs.
+ *
+ * Copyright (C) 2019 Brian Masney <masneyb@onstation.org>
+ * Copyright (C) 2015 Red Hat. Author: Rob Clark <robdclark@gmail.com>
+ */
+
+#include <linux/device.h>
+#include <linux/err.h>
+
+#ifndef __OCMEM_H__
+#define __OCMEM_H__
+
+enum ocmem_client {
+ /* GMEM clients */
+ OCMEM_GRAPHICS = 0x0,
+ /*
+ * TODO add more once ocmem_allocate() is clever enough to
+ * deal with multiple clients.
+ */
+ OCMEM_CLIENT_MAX,
+};
+
+struct ocmem;
+
+struct ocmem_buf {
+ unsigned long offset;
+ unsigned long addr;
+ unsigned long len;
+};
+
+#if IS_ENABLED(CONFIG_QCOM_OCMEM)
+
+struct ocmem *of_get_ocmem(struct device *dev);
+struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem, enum ocmem_client client,
+ unsigned long size);
+void ocmem_free(struct ocmem *ocmem, enum ocmem_client client,
+ struct ocmem_buf *buf);
+
+#else /* IS_ENABLED(CONFIG_QCOM_OCMEM) */
+
+static inline struct ocmem *of_get_ocmem(struct device *dev)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline struct ocmem_buf *ocmem_allocate(struct ocmem *ocmem,
+ enum ocmem_client client,
+ unsigned long size)
+{
+ return ERR_PTR(-ENODEV);
+}
+
+static inline void ocmem_free(struct ocmem *ocmem, enum ocmem_client client,
+ struct ocmem_buf *buf)
+{
+}
+
+#endif /* IS_ENABLED(CONFIG_QCOM_OCMEM) */
+
+#endif /* __OCMEM_H__ */
diff --git a/include/soc/sifive/sifive_l2_cache.h b/include/soc/sifive/sifive_l2_cache.h
new file mode 100644
index 000000000000..92ade10ed67e
--- /dev/null
+++ b/include/soc/sifive/sifive_l2_cache.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SiFive L2 Cache Controller header file
+ *
+ */
+
+#ifndef __SOC_SIFIVE_L2_CACHE_H
+#define __SOC_SIFIVE_L2_CACHE_H
+
+extern int register_sifive_l2_error_notifier(struct notifier_block *nb);
+extern int unregister_sifive_l2_error_notifier(struct notifier_block *nb);
+
+#define SIFIVE_L2_ERR_TYPE_CE 0
+#define SIFIVE_L2_ERR_TYPE_UE 1
+
+#endif /* __SOC_SIFIVE_L2_CACHE_H */
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index 16e2c2fb5f6c..1238e35653d1 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -181,7 +181,7 @@ struct tegra_mc {
spinlock_t lock;
};
-void tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate);
+int tegra_mc_write_emem_configuration(struct tegra_mc *mc, unsigned long rate);
unsigned int tegra_mc_get_emem_device_count(struct tegra_mc *mc);
#endif /* __SOC_TEGRA_MC_H__ */
diff --git a/include/sound/ac97_codec.h b/include/sound/ac97_codec.h
index cc383991c0fe..49200ec26dc4 100644
--- a/include/sound/ac97_codec.h
+++ b/include/sound/ac97_codec.h
@@ -197,7 +197,7 @@ struct snd_ac97_bus_ops {
struct snd_ac97_bus {
/* -- lowlevel (hardware) driver specific -- */
- struct snd_ac97_bus_ops *ops;
+ const struct snd_ac97_bus_ops *ops;
void *private_data;
void (*private_free) (struct snd_ac97_bus *bus);
/* --- */
@@ -310,7 +310,8 @@ static inline int ac97_can_spdif(struct snd_ac97 * ac97)
/* functions */
/* create new AC97 bus */
-int snd_ac97_bus(struct snd_card *card, int num, struct snd_ac97_bus_ops *ops,
+int snd_ac97_bus(struct snd_card *card, int num,
+ const struct snd_ac97_bus_ops *ops,
void *private_data, struct snd_ac97_bus **rbus);
/* create mixer controls */
int snd_ac97_mixer(struct snd_ac97_bus *bus, struct snd_ac97_template *template,
diff --git a/include/sound/aess.h b/include/sound/aess.h
deleted file mode 100644
index cee0d09fadbd..000000000000
--- a/include/sound/aess.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * AESS IP block reset
- *
- * Copyright (C) 2012 Texas Instruments, Inc.
- * Paul Walmsley
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed "as is" WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
- * 02110-1301 USA
- */
-#ifndef __SOUND_AESS_H__
-#define __SOUND_AESS_H__
-
-#include <linux/kernel.h>
-#include <linux/io.h>
-
-/*
- * AESS_AUTO_GATING_ENABLE_OFFSET: offset in bytes of the AESS IP
- * block's AESS_AUTO_GATING_ENABLE__1 register from the IP block's
- * base address
- */
-#define AESS_AUTO_GATING_ENABLE_OFFSET 0x07c
-
-/* Register bitfields in the AESS_AUTO_GATING_ENABLE__1 register */
-#define AESS_AUTO_GATING_ENABLE_SHIFT 0
-
-/**
- * aess_enable_autogating - enable AESS internal autogating
- * @oh: struct omap_hwmod *
- *
- * Enable internal autogating on the AESS. This allows the AESS to
- * indicate that it is idle to the OMAP PRCM. Returns 0.
- */
-static inline void aess_enable_autogating(void __iomem *base)
-{
- u32 v;
-
- /* Set AESS_AUTO_GATING_ENABLE__1.ENABLE to allow idle entry */
- v = 1 << AESS_AUTO_GATING_ENABLE_SHIFT;
- writel(v, base + AESS_AUTO_GATING_ENABLE_OFFSET);
-}
-
-#endif /* __SOUND_AESS_H__ */
diff --git a/include/sound/control.h b/include/sound/control.h
index 5d7c99475684..11feeee31e35 100644
--- a/include/sound/control.h
+++ b/include/sound/control.h
@@ -22,6 +22,16 @@ typedef int (snd_kcontrol_tlv_rw_t)(struct snd_kcontrol *kcontrol,
unsigned int size,
unsigned int __user *tlv);
+/* internal flag for skipping validations */
+#ifdef CONFIG_SND_CTL_VALIDATION
+#define SNDRV_CTL_ELEM_ACCESS_SKIP_CHECK (1 << 27)
+#define snd_ctl_skip_validation(info) \
+ ((info)->access & SNDRV_CTL_ELEM_ACCESS_SKIP_CHECK)
+#else
+#define SNDRV_CTL_ELEM_ACCESS_SKIP_CHECK 0
+#define snd_ctl_skip_validation(info) true
+#endif
+
enum {
SNDRV_CTL_TLV_OP_READ = 0,
SNDRV_CTL_TLV_OP_WRITE = 1,
diff --git a/include/sound/core.h b/include/sound/core.h
index ee238f100f73..ac8b692b69b4 100644
--- a/include/sound/core.h
+++ b/include/sound/core.h
@@ -69,7 +69,7 @@ struct snd_device {
enum snd_device_state state; /* state of the device */
enum snd_device_type type; /* device type */
void *device_data; /* device structure */
- struct snd_device_ops *ops; /* operations */
+ const struct snd_device_ops *ops; /* operations */
};
#define snd_device(n) list_entry(n, struct snd_device, list)
@@ -117,8 +117,12 @@ struct snd_card {
struct device card_dev; /* cardX object for sysfs */
const struct attribute_group *dev_groups[4]; /* assigned sysfs attr */
bool registered; /* card_dev is registered? */
+ int sync_irq; /* assigned irq, used for PCM sync */
wait_queue_head_t remove_sleep;
+ size_t total_pcm_alloc_bytes; /* total amount of allocated buffers */
+ struct mutex memory_mutex; /* protection for the above */
+
#ifdef CONFIG_PM
unsigned int power_state; /* power state */
wait_queue_head_t power_sleep;
@@ -255,7 +259,7 @@ static inline void snd_card_unref(struct snd_card *card)
/* device.c */
int snd_device_new(struct snd_card *card, enum snd_device_type type,
- void *device_data, struct snd_device_ops *ops);
+ void *device_data, const struct snd_device_ops *ops);
int snd_device_register(struct snd_card *card, void *device_data);
int snd_device_register_all(struct snd_card *card);
void snd_device_disconnect(struct snd_card *card, void *device_data);
diff --git a/include/sound/dmaengine_pcm.h b/include/sound/dmaengine_pcm.h
index c679f6116580..b65220685920 100644
--- a/include/sound/dmaengine_pcm.h
+++ b/include/sound/dmaengine_pcm.h
@@ -83,6 +83,11 @@ void snd_dmaengine_pcm_set_config_from_dai_data(
const struct snd_dmaengine_dai_dma_data *dma_data,
struct dma_slave_config *config);
+int snd_dmaengine_pcm_refine_runtime_hwparams(
+ struct snd_pcm_substream *substream,
+ struct snd_dmaengine_dai_dma_data *dma_data,
+ struct snd_pcm_hardware *hw,
+ struct dma_chan *chan);
/*
* Try to request the DMA channel using compat_request_channel or
diff --git a/include/sound/hda_codec.h b/include/sound/hda_codec.h
index 9a0393cf024c..3ee8036f5436 100644
--- a/include/sound/hda_codec.h
+++ b/include/sound/hda_codec.h
@@ -51,7 +51,6 @@ struct hda_bus {
DECLARE_BITMAP(pcm_dev_bits, SNDRV_PCM_DEVICES);
/* misc op flags */
- unsigned int needs_damn_long_delay :1;
unsigned int allow_bus_reset:1; /* allow bus reset at fatal error */
/* status for codec/controller */
unsigned int shutdown :1; /* being unloaded */
@@ -254,6 +253,7 @@ struct hda_codec {
unsigned int force_pin_prefix:1; /* Add location prefix */
unsigned int link_down_at_suspend:1; /* link down at runtime suspend */
unsigned int relaxed_resume:1; /* don't resume forcibly for jack */
+ unsigned int mst_no_extra_pcms:1; /* no backup PCMs for DP-MST */
#ifdef CONFIG_PM
unsigned long power_on_acct;
diff --git a/include/sound/hda_regmap.h b/include/sound/hda_regmap.h
index 5141f8ffbb12..4c1b9bebbd60 100644
--- a/include/sound/hda_regmap.h
+++ b/include/sound/hda_regmap.h
@@ -24,6 +24,9 @@ int snd_hdac_regmap_write_raw(struct hdac_device *codec, unsigned int reg,
unsigned int val);
int snd_hdac_regmap_update_raw(struct hdac_device *codec, unsigned int reg,
unsigned int mask, unsigned int val);
+int snd_hdac_regmap_update_raw_once(struct hdac_device *codec, unsigned int reg,
+ unsigned int mask, unsigned int val);
+void snd_hdac_regmap_sync(struct hdac_device *codec);
/**
* snd_hdac_regmap_encode_verb - encode the verb to a pseudo register
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index b260c5fd2337..d4299e146d95 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -8,6 +8,7 @@
#include <linux/device.h>
#include <linux/interrupt.h>
+#include <linux/io.h>
#include <linux/pm_runtime.h>
#include <linux/timecounter.h>
#include <sound/core.h>
@@ -86,6 +87,7 @@ struct hdac_device {
/* regmap */
struct regmap *regmap;
+ struct mutex regmap_lock;
struct snd_array vendor_verbs;
bool lazy_cache:1; /* don't wake up for writes */
bool caps_overwriting:1; /* caps overwrite being in process */
@@ -317,6 +319,7 @@ struct hdac_bus {
struct hdac_rb corb;
struct hdac_rb rirb;
unsigned int last_cmd[HDA_MAX_CODECS]; /* last sent command */
+ wait_queue_head_t rirb_wq;
/* CORB/RIRB and position buffers */
struct snd_dma_buffer rb;
@@ -330,6 +333,7 @@ struct hdac_bus {
bool chip_init:1; /* h/w initialized */
/* behavior flags */
+ bool aligned_mmio:1; /* aligned MMIO access */
bool sync_write:1; /* sync after verb write */
bool use_posbuf:1; /* use position buffer */
bool snoop:1; /* enable snooping */
@@ -337,6 +341,7 @@ struct hdac_bus {
bool reverse_assign:1; /* assign devices in reverse order */
bool corbrp_self_clear:1; /* CORBRP clears itself after reset */
bool polling_mode:1;
+ bool needs_damn_long_delay:1;
int poll_count;
@@ -405,34 +410,61 @@ void snd_hdac_bus_free_stream_pages(struct hdac_bus *bus);
unsigned int snd_hdac_aligned_read(void __iomem *addr, unsigned int mask);
void snd_hdac_aligned_write(unsigned int val, void __iomem *addr,
unsigned int mask);
-#define snd_hdac_reg_writeb(v, addr) snd_hdac_aligned_write(v, addr, 0xff)
-#define snd_hdac_reg_writew(v, addr) snd_hdac_aligned_write(v, addr, 0xffff)
-#define snd_hdac_reg_readb(addr) snd_hdac_aligned_read(addr, 0xff)
-#define snd_hdac_reg_readw(addr) snd_hdac_aligned_read(addr, 0xffff)
-#else /* CONFIG_SND_HDA_ALIGNED_MMIO */
-#define snd_hdac_reg_writeb(val, addr) writeb(val, addr)
-#define snd_hdac_reg_writew(val, addr) writew(val, addr)
-#define snd_hdac_reg_readb(addr) readb(addr)
-#define snd_hdac_reg_readw(addr) readw(addr)
-#endif /* CONFIG_SND_HDA_ALIGNED_MMIO */
-#define snd_hdac_reg_writel(val, addr) writel(val, addr)
-#define snd_hdac_reg_readl(addr) readl(addr)
+#define snd_hdac_aligned_mmio(bus) (bus)->aligned_mmio
+#else
+#define snd_hdac_aligned_mmio(bus) false
+#define snd_hdac_aligned_read(addr, mask) 0
+#define snd_hdac_aligned_write(val, addr, mask) do {} while (0)
+#endif
+
+static inline void snd_hdac_reg_writeb(struct hdac_bus *bus, void __iomem *addr,
+ u8 val)
+{
+ if (snd_hdac_aligned_mmio(bus))
+ snd_hdac_aligned_write(val, addr, 0xff);
+ else
+ writeb(val, addr);
+}
+
+static inline void snd_hdac_reg_writew(struct hdac_bus *bus, void __iomem *addr,
+ u16 val)
+{
+ if (snd_hdac_aligned_mmio(bus))
+ snd_hdac_aligned_write(val, addr, 0xffff);
+ else
+ writew(val, addr);
+}
+
+static inline u8 snd_hdac_reg_readb(struct hdac_bus *bus, void __iomem *addr)
+{
+ return snd_hdac_aligned_mmio(bus) ?
+ snd_hdac_aligned_read(addr, 0xff) : readb(addr);
+}
+
+static inline u16 snd_hdac_reg_readw(struct hdac_bus *bus, void __iomem *addr)
+{
+ return snd_hdac_aligned_mmio(bus) ?
+ snd_hdac_aligned_read(addr, 0xffff) : readw(addr);
+}
+
+#define snd_hdac_reg_writel(bus, addr, val) writel(val, addr)
+#define snd_hdac_reg_readl(bus, addr) readl(addr)
/*
* macros for easy use
*/
#define _snd_hdac_chip_writeb(chip, reg, value) \
- snd_hdac_reg_writeb(value, (chip)->remap_addr + (reg))
+ snd_hdac_reg_writeb(chip, (chip)->remap_addr + (reg), value)
#define _snd_hdac_chip_readb(chip, reg) \
- snd_hdac_reg_readb((chip)->remap_addr + (reg))
+ snd_hdac_reg_readb(chip, (chip)->remap_addr + (reg))
#define _snd_hdac_chip_writew(chip, reg, value) \
- snd_hdac_reg_writew(value, (chip)->remap_addr + (reg))
+ snd_hdac_reg_writew(chip, (chip)->remap_addr + (reg), value)
#define _snd_hdac_chip_readw(chip, reg) \
- snd_hdac_reg_readw((chip)->remap_addr + (reg))
+ snd_hdac_reg_readw(chip, (chip)->remap_addr + (reg))
#define _snd_hdac_chip_writel(chip, reg, value) \
- snd_hdac_reg_writel(value, (chip)->remap_addr + (reg))
+ snd_hdac_reg_writel(chip, (chip)->remap_addr + (reg), value)
#define _snd_hdac_chip_readl(chip, reg) \
- snd_hdac_reg_readl((chip)->remap_addr + (reg))
+ snd_hdac_reg_readl(chip, (chip)->remap_addr + (reg))
/* read/write a register, pass without AZX_REG_ prefix */
#define snd_hdac_chip_writel(chip, reg, value) \
@@ -493,6 +525,7 @@ struct hdac_stream {
bool prepared:1;
bool no_period_wakeup:1;
bool locked:1;
+ bool stripe:1; /* apply stripe control */
/* timestamp */
unsigned long start_wallclk; /* start + minimum wallclk */
@@ -539,17 +572,17 @@ int snd_hdac_get_stream_stripe_ctl(struct hdac_bus *bus,
*/
/* read/write a register, pass without AZX_REG_ prefix */
#define snd_hdac_stream_writel(dev, reg, value) \
- snd_hdac_reg_writel(value, (dev)->sd_addr + AZX_REG_ ## reg)
+ snd_hdac_reg_writel((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg, value)
#define snd_hdac_stream_writew(dev, reg, value) \
- snd_hdac_reg_writew(value, (dev)->sd_addr + AZX_REG_ ## reg)
+ snd_hdac_reg_writew((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg, value)
#define snd_hdac_stream_writeb(dev, reg, value) \
- snd_hdac_reg_writeb(value, (dev)->sd_addr + AZX_REG_ ## reg)
+ snd_hdac_reg_writeb((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg, value)
#define snd_hdac_stream_readl(dev, reg) \
- snd_hdac_reg_readl((dev)->sd_addr + AZX_REG_ ## reg)
+ snd_hdac_reg_readl((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
#define snd_hdac_stream_readw(dev, reg) \
- snd_hdac_reg_readw((dev)->sd_addr + AZX_REG_ ## reg)
+ snd_hdac_reg_readw((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
#define snd_hdac_stream_readb(dev, reg) \
- snd_hdac_reg_readb((dev)->sd_addr + AZX_REG_ ## reg)
+ snd_hdac_reg_readb((dev)->bus, (dev)->sd_addr + AZX_REG_ ## reg)
/* update a register, pass without AZX_REG_ prefix */
#define snd_hdac_stream_updatel(dev, reg, mask, val) \
diff --git a/include/sound/info.h b/include/sound/info.h
index b01a22913400..7c13bf52cc81 100644
--- a/include/sound/info.h
+++ b/include/sound/info.h
@@ -64,7 +64,7 @@ struct snd_info_entry {
unsigned short content;
union {
struct snd_info_entry_text text;
- struct snd_info_entry_ops *ops;
+ const struct snd_info_entry_ops *ops;
} c;
struct snd_info_entry *parent;
struct module *module;
diff --git a/include/sound/initval.h b/include/sound/initval.h
index 3ddae2b4177e..a1ff3b4865b4 100644
--- a/include/sound/initval.h
+++ b/include/sound/initval.h
@@ -37,7 +37,7 @@
#define SNDRV_DEFAULT_PTR SNDRV_DEFAULT_STR
#ifdef SNDRV_LEGACY_FIND_FREE_IOPORT
-static long snd_legacy_find_free_ioport(long *port_table, long size)
+static long snd_legacy_find_free_ioport(const long *port_table, long size)
{
while (*port_table != -1) {
if (request_region(*port_table, size, "ALSA test")) {
@@ -58,7 +58,7 @@ static irqreturn_t snd_legacy_empty_irq_handler(int irq, void *dev_id)
return IRQ_HANDLED;
}
-static int snd_legacy_find_free_irq(int *irq_table)
+static int snd_legacy_find_free_irq(const int *irq_table)
{
while (*irq_table != -1) {
if (!request_irq(*irq_table, snd_legacy_empty_irq_handler,
@@ -74,7 +74,7 @@ static int snd_legacy_find_free_irq(int *irq_table)
#endif
#ifdef SNDRV_LEGACY_FIND_FREE_DMA
-static int snd_legacy_find_free_dma(int *dma_table)
+static int snd_legacy_find_free_dma(const int *dma_table)
{
while (*dma_table != -1) {
if (!request_dma(*dma_table, "ALSA Test DMA")) {
diff --git a/include/sound/intel-dsp-config.h b/include/sound/intel-dsp-config.h
new file mode 100644
index 000000000000..c36622bee3f8
--- /dev/null
+++ b/include/sound/intel-dsp-config.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * intel-dsp-config.h - Intel DSP config
+ *
+ * Copyright (c) 2019 Jaroslav Kysela <perex@perex.cz>
+ */
+
+#ifndef __INTEL_DSP_CONFIG_H__
+#define __INTEL_DSP_CONFIG_H__
+
+struct pci_dev;
+
+enum {
+ SND_INTEL_DSP_DRIVER_ANY = 0,
+ SND_INTEL_DSP_DRIVER_LEGACY,
+ SND_INTEL_DSP_DRIVER_SST,
+ SND_INTEL_DSP_DRIVER_SOF,
+ SND_INTEL_DSP_DRIVER_LAST = SND_INTEL_DSP_DRIVER_SOF
+};
+
+#if IS_ENABLED(CONFIG_SND_INTEL_DSP_CONFIG)
+
+int snd_intel_dsp_driver_probe(struct pci_dev *pci);
+
+#else
+
+static inline int snd_intel_dsp_driver_probe(struct pci_dev *pci)
+{
+ return SND_INTEL_DSP_DRIVER_ANY;
+}
+
+#endif
+
+#endif
diff --git a/include/sound/memalloc.h b/include/sound/memalloc.h
index 240622d89c0b..3b47832b1c1f 100644
--- a/include/sound/memalloc.h
+++ b/include/sound/memalloc.h
@@ -21,7 +21,6 @@ struct snd_dma_device {
struct device *dev; /* generic device */
};
-#define snd_dma_pci_data(pci) (&(pci)->dev)
#define snd_dma_continuous_data(x) ((struct device *)(__force unsigned long)(x))
@@ -44,6 +43,7 @@ struct snd_dma_device {
#else
#define SNDRV_DMA_TYPE_DEV_IRAM SNDRV_DMA_TYPE_DEV
#endif
+#define SNDRV_DMA_TYPE_VMALLOC 7 /* vmalloc'ed buffer */
/*
* info for buffer allocation
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index bbe6eb1ff5d2..f657ff08f317 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -44,6 +44,7 @@ struct snd_pcm_hardware {
size_t fifo_size; /* fifo size in bytes */
};
+struct snd_pcm_status64;
struct snd_pcm_substream;
struct snd_pcm_audio_tstamp_config; /* definitions further down */
@@ -59,9 +60,10 @@ struct snd_pcm_ops {
int (*hw_free)(struct snd_pcm_substream *substream);
int (*prepare)(struct snd_pcm_substream *substream);
int (*trigger)(struct snd_pcm_substream *substream, int cmd);
+ int (*sync_stop)(struct snd_pcm_substream *substream);
snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *substream);
int (*get_time_info)(struct snd_pcm_substream *substream,
- struct timespec *system_ts, struct timespec *audio_ts,
+ struct timespec64 *system_ts, struct timespec64 *audio_ts,
struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
struct snd_pcm_audio_tstamp_report *audio_tstamp_report);
int (*fill_silence)(struct snd_pcm_substream *substream, int channel,
@@ -342,7 +344,7 @@ static inline void snd_pcm_pack_audio_tstamp_report(__u32 *data, __u32 *accuracy
struct snd_pcm_runtime {
/* -- Status -- */
struct snd_pcm_substream *trigger_master;
- struct timespec trigger_tstamp; /* trigger timestamp */
+ struct timespec64 trigger_tstamp; /* trigger timestamp */
bool trigger_tstamp_latched; /* trigger timestamp latched in low-level driver/hardware */
int overrange;
snd_pcm_uframes_t avail_max;
@@ -395,6 +397,7 @@ struct snd_pcm_runtime {
wait_queue_head_t sleep; /* poll sleep */
wait_queue_head_t tsleep; /* transfer sleep */
struct fasync_struct *fasync;
+ bool stop_operating; /* sync_stop will be called */
/* -- private section -- */
void *private_data;
@@ -414,11 +417,12 @@ struct snd_pcm_runtime {
size_t dma_bytes; /* size of DMA area */
struct snd_dma_buffer *dma_buffer_p; /* allocated buffer */
+ unsigned int buffer_changed:1; /* buffer allocation changed; set only in managed mode */
/* -- audio timestamp config -- */
struct snd_pcm_audio_tstamp_config audio_tstamp_config;
struct snd_pcm_audio_tstamp_report audio_tstamp_report;
- struct timespec driver_tstamp;
+ struct timespec64 driver_tstamp;
#if IS_ENABLED(CONFIG_SND_PCM_OSS)
/* -- OSS things -- */
@@ -475,6 +479,7 @@ struct snd_pcm_substream {
#endif /* CONFIG_SND_VERBOSE_PROCFS */
/* misc flags */
unsigned int hw_opened: 1;
+ unsigned int managed_buffer_alloc:1;
};
#define SUBSTREAM_BUSY(substream) ((substream)->ref_count > 0)
@@ -554,8 +559,8 @@ int snd_pcm_notify(struct snd_pcm_notify *notify, int nfree);
int snd_pcm_info(struct snd_pcm_substream *substream, struct snd_pcm_info *info);
int snd_pcm_info_user(struct snd_pcm_substream *substream,
struct snd_pcm_info __user *info);
-int snd_pcm_status(struct snd_pcm_substream *substream,
- struct snd_pcm_status *status);
+int snd_pcm_status64(struct snd_pcm_substream *substream,
+ struct snd_pcm_status64 *status);
int snd_pcm_start(struct snd_pcm_substream *substream);
int snd_pcm_stop(struct snd_pcm_substream *substream, snd_pcm_state_t status);
int snd_pcm_drain_done(struct snd_pcm_substream *substream);
@@ -1151,22 +1156,22 @@ static inline void snd_pcm_set_runtime_buffer(struct snd_pcm_substream *substrea
}
/**
- * snd_pcm_gettime - Fill the timespec depending on the timestamp mode
+ * snd_pcm_gettime - Fill the timespec64 depending on the timestamp mode
* @runtime: PCM runtime instance
- * @tv: timespec to fill
+ * @tv: timespec64 to fill
*/
static inline void snd_pcm_gettime(struct snd_pcm_runtime *runtime,
- struct timespec *tv)
+ struct timespec64 *tv)
{
switch (runtime->tstamp_type) {
case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC:
- ktime_get_ts(tv);
+ ktime_get_ts64(tv);
break;
case SNDRV_PCM_TSTAMP_TYPE_MONOTONIC_RAW:
- getrawmonotonic(tv);
+ ktime_get_raw_ts64(tv);
break;
default:
- getnstimeofday(tv);
+ ktime_get_real_ts64(tv);
break;
}
}
@@ -1186,6 +1191,12 @@ void snd_pcm_lib_preallocate_pages_for_all(struct snd_pcm *pcm,
int snd_pcm_lib_malloc_pages(struct snd_pcm_substream *substream, size_t size);
int snd_pcm_lib_free_pages(struct snd_pcm_substream *substream);
+void snd_pcm_set_managed_buffer(struct snd_pcm_substream *substream, int type,
+ struct device *data, size_t size, size_t max);
+void snd_pcm_set_managed_buffer_all(struct snd_pcm *pcm, int type,
+ struct device *data,
+ size_t size, size_t max);
+
int _snd_pcm_lib_alloc_vmalloc_buffer(struct snd_pcm_substream *substream,
size_t size, gfp_t gfp_flags);
int snd_pcm_lib_free_vmalloc_buffer(struct snd_pcm_substream *substream);
@@ -1236,14 +1247,6 @@ static inline int snd_pcm_lib_alloc_vmalloc_32_buffer
*/
#define snd_pcm_substream_sgbuf(substream) \
snd_pcm_get_dma_buf(substream)->private_data
-
-struct page *snd_pcm_sgbuf_ops_page(struct snd_pcm_substream *substream,
- unsigned long offset);
-#else /* !SND_DMA_SGBUF */
-/*
- * fake using a continuous buffer
- */
-#define snd_pcm_sgbuf_ops_page NULL
#endif /* SND_DMA_SGBUF */
/**
@@ -1336,8 +1339,6 @@ static inline void snd_pcm_limit_isa_dma_size(int dma, size_t *max)
(IEC958_AES1_CON_PCM_CODER<<8)|\
(IEC958_AES3_CON_FS_48000<<24))
-#define PCM_RUNTIME_CHECK(sub) snd_BUG_ON(!(sub) || !(sub)->runtime)
-
const char *snd_pcm_format_name(snd_pcm_format_t format);
/**
@@ -1422,4 +1423,55 @@ static inline u64 pcm_format_to_bits(snd_pcm_format_t pcm_format)
#define pcm_dbg(pcm, fmt, args...) \
dev_dbg((pcm)->card->dev, fmt, ##args)
+struct snd_pcm_status64 {
+ snd_pcm_state_t state; /* stream state */
+ u8 rsvd[4];
+ s64 trigger_tstamp_sec; /* time when stream was started/stopped/paused */
+ s64 trigger_tstamp_nsec;
+ s64 tstamp_sec; /* reference timestamp */
+ s64 tstamp_nsec;
+ snd_pcm_uframes_t appl_ptr; /* appl ptr */
+ snd_pcm_uframes_t hw_ptr; /* hw ptr */
+ snd_pcm_sframes_t delay; /* current delay in frames */
+ snd_pcm_uframes_t avail; /* number of frames available */
+ snd_pcm_uframes_t avail_max; /* max frames available on hw since last status */
+ snd_pcm_uframes_t overrange; /* count of ADC (capture) overrange detections from last status */
+ snd_pcm_state_t suspended_state; /* suspended stream state */
+ __u32 audio_tstamp_data; /* needed for 64-bit alignment, used for configs/report to/from userspace */
+ s64 audio_tstamp_sec; /* sample counter, wall clock, PHC or on-demand sync'ed */
+ s64 audio_tstamp_nsec;
+ s64 driver_tstamp_sec; /* useful in case reference system tstamp is reported with delay */
+ s64 driver_tstamp_nsec;
+ __u32 audio_tstamp_accuracy; /* in ns units, only valid if indicated in audio_tstamp_data */
+ unsigned char reserved[52-4*sizeof(s64)]; /* must be filled with zero */
+};
+
+#define SNDRV_PCM_IOCTL_STATUS64 _IOR('A', 0x20, struct snd_pcm_status64)
+#define SNDRV_PCM_IOCTL_STATUS_EXT64 _IOWR('A', 0x24, struct snd_pcm_status64)
+
+struct snd_pcm_status32 {
+ snd_pcm_state_t state; /* stream state */
+ s32 trigger_tstamp_sec; /* time when stream was started/stopped/paused */
+ s32 trigger_tstamp_nsec;
+ s32 tstamp_sec; /* reference timestamp */
+ s32 tstamp_nsec;
+ u32 appl_ptr; /* appl ptr */
+ u32 hw_ptr; /* hw ptr */
+ s32 delay; /* current delay in frames */
+ u32 avail; /* number of frames available */
+ u32 avail_max; /* max frames available on hw since last status */
+ u32 overrange; /* count of ADC (capture) overrange detections from last status */
+ snd_pcm_state_t suspended_state; /* suspended stream state */
+ u32 audio_tstamp_data; /* needed for 64-bit alignment, used for configs/report to/from userspace */
+ s32 audio_tstamp_sec; /* sample counter, wall clock, PHC or on-demand sync'ed */
+ s32 audio_tstamp_nsec;
+ s32 driver_tstamp_sec; /* useful in case reference system tstamp is reported with delay */
+ s32 driver_tstamp_nsec;
+ u32 audio_tstamp_accuracy; /* in ns units, only valid if indicated in audio_tstamp_data */
+ unsigned char reserved[52-4*sizeof(s32)]; /* must be filled with zero */
+};
+
+#define SNDRV_PCM_IOCTL_STATUS32 _IOR('A', 0x20, struct snd_pcm_status32)
+#define SNDRV_PCM_IOCTL_STATUS_EXT32 _IOWR('A', 0x24, struct snd_pcm_status32)
+
#endif /* __SOUND_PCM_H */
diff --git a/include/sound/pxa2xx-lib.h b/include/sound/pxa2xx-lib.h
index 6758fc12fa84..0feaf16e6ac0 100644
--- a/include/sound/pxa2xx-lib.h
+++ b/include/sound/pxa2xx-lib.h
@@ -10,6 +10,7 @@ struct snd_pcm_substream;
struct snd_pcm_hw_params;
struct snd_soc_pcm_runtime;
struct snd_pcm;
+struct snd_soc_component;
extern int pxa2xx_pcm_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params);
@@ -23,8 +24,29 @@ extern int pxa2xx_pcm_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma);
extern int pxa2xx_pcm_preallocate_dma_buffer(struct snd_pcm *pcm, int stream);
extern void pxa2xx_pcm_free_dma_buffers(struct snd_pcm *pcm);
-extern int pxa2xx_soc_pcm_new(struct snd_soc_pcm_runtime *rtd);
-extern const struct snd_pcm_ops pxa2xx_pcm_ops;
+extern void pxa2xx_soc_pcm_free(struct snd_soc_component *component,
+ struct snd_pcm *pcm);
+extern int pxa2xx_soc_pcm_new(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd);
+extern int pxa2xx_soc_pcm_open(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+extern int pxa2xx_soc_pcm_close(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+extern int pxa2xx_soc_pcm_hw_params(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params);
+extern int pxa2xx_soc_pcm_hw_free(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+extern int pxa2xx_soc_pcm_prepare(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+extern int pxa2xx_soc_pcm_trigger(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd);
+extern snd_pcm_uframes_t
+pxa2xx_soc_pcm_pointer(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+extern int pxa2xx_soc_pcm_mmap(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma);
/* AC97 */
diff --git a/include/sound/rawmidi.h b/include/sound/rawmidi.h
index 40ab20439fee..a36b7227a15a 100644
--- a/include/sound/rawmidi.h
+++ b/include/sound/rawmidi.h
@@ -77,9 +77,9 @@ struct snd_rawmidi_substream {
struct list_head list; /* list of all substream for given stream */
int stream; /* direction */
int number; /* substream number */
- unsigned int opened: 1, /* open flag */
- append: 1, /* append flag (merge more streams) */
- active_sensing: 1; /* send active sensing when close */
+ bool opened; /* open flag */
+ bool append; /* append flag (merge more streams) */
+ bool active_sensing; /* send active sensing when close */
int use_count; /* use counter (for output) */
size_t bytes;
struct snd_rawmidi *rmidi;
diff --git a/include/sound/rt5682.h b/include/sound/rt5682.h
index bf2ee75aabb1..bc2c31734df1 100644
--- a/include/sound/rt5682.h
+++ b/include/sound/rt5682.h
@@ -31,6 +31,7 @@ struct rt5682_platform_data {
enum rt5682_dmic1_data_pin dmic1_data_pin;
enum rt5682_dmic1_clk_pin dmic1_clk_pin;
enum rt5682_jd_src jd_src;
+ unsigned int btndet_delay;
};
#endif
diff --git a/include/sound/seq_midi_emul.h b/include/sound/seq_midi_emul.h
index d6b74059b4b1..88799d1e1f53 100644
--- a/include/sound/seq_midi_emul.h
+++ b/include/sound/seq_midi_emul.h
@@ -174,7 +174,8 @@ enum {
};
/* Prototypes for midi_process.c */
-void snd_midi_process_event(struct snd_midi_op *ops, struct snd_seq_event *ev,
+void snd_midi_process_event(const struct snd_midi_op *ops,
+ struct snd_seq_event *ev,
struct snd_midi_channel_set *chanset);
void snd_midi_channel_set_clear(struct snd_midi_channel_set *chset);
struct snd_midi_channel_set *snd_midi_channel_alloc_set(int n);
diff --git a/include/sound/simple_card_utils.h b/include/sound/simple_card_utils.h
index 31f76b6abf71..bbdd1542d6f1 100644
--- a/include/sound/simple_card_utils.h
+++ b/include/sound/simple_card_utils.h
@@ -8,6 +8,7 @@
#ifndef __SIMPLE_CARD_UTILS_H
#define __SIMPLE_CARD_UTILS_H
+#include <linux/clk.h>
#include <sound/soc.h>
#define asoc_simple_init_hp(card, sjack, prefix) \
diff --git a/include/sound/soc-acpi-intel-match.h b/include/sound/soc-acpi-intel-match.h
index 6c9929abd90b..ab6f75a86611 100644
--- a/include/sound/soc-acpi-intel-match.h
+++ b/include/sound/soc-acpi-intel-match.h
@@ -24,9 +24,18 @@ extern struct snd_soc_acpi_mach snd_soc_acpi_intel_kbl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_bxt_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_glk_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cfl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_machines[];
extern struct snd_soc_acpi_mach snd_soc_acpi_intel_ehl_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_jsl_machines[];
+
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cnl_sdw_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cfl_sdw_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_cml_sdw_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_icl_sdw_machines[];
+extern struct snd_soc_acpi_mach snd_soc_acpi_intel_tgl_sdw_machines[];
/*
* generic table used for HDA codec-based platforms, possibly with
diff --git a/include/sound/soc-acpi.h b/include/sound/soc-acpi.h
index 35b38e41e5b2..a217a87cae86 100644
--- a/include/sound/soc-acpi.h
+++ b/include/sound/soc-acpi.h
@@ -60,12 +60,33 @@ static inline struct snd_soc_acpi_mach *snd_soc_acpi_codec_list(void *arg)
* @acpi_ipc_irq_index: used for BYT-CR detection
* @platform: string used for HDaudio codec support
* @codec_mask: used for HDAudio support
+ * @common_hdmi_codec_drv: use commom HDAudio HDMI codec driver
+ * @link_mask: links enabled on the board
+ * @links: array of link _ADR descriptors, null terminated
*/
struct snd_soc_acpi_mach_params {
u32 acpi_ipc_irq_index;
const char *platform;
u32 codec_mask;
u32 dmic_num;
+ bool common_hdmi_codec_drv;
+ u32 link_mask;
+ const struct snd_soc_acpi_link_adr *links;
+};
+
+/**
+ * snd_soc_acpi_link_adr: ACPI-based list of _ADR, with a variable
+ * number of devices per link
+ *
+ * @mask: one bit set indicates the link this list applies to
+ * @num_adr: ARRAY_SIZE of adr
+ * @adr: array of _ADR (represented as u64).
+ */
+
+struct snd_soc_acpi_link_adr {
+ const u32 mask;
+ const u32 num_adr;
+ const u64 *adr;
};
/**
@@ -75,6 +96,8 @@ struct snd_soc_acpi_mach_params {
* all firmware/topology related fields.
*
* @id: ACPI ID (usually the codec's) used to find a matching machine driver.
+ * @link_mask: describes required board layout, e.g. for SoundWire.
+ * @links: array of link _ADR descriptors, null terminated.
* @drv_name: machine driver name
* @fw_filename: firmware file name. Used when SOF is not enabled.
* @board: board name
@@ -90,6 +113,8 @@ struct snd_soc_acpi_mach_params {
/* Descriptor for SST ASoC machine driver */
struct snd_soc_acpi_mach {
const u8 id[ACPI_ID_LEN];
+ const u32 link_mask;
+ const struct snd_soc_acpi_link_adr *links;
const char *drv_name;
const char *fw_filename;
const char *board;
diff --git a/include/sound/soc-component.h b/include/sound/soc-component.h
index 5d80b2eef525..154d02fbbfed 100644
--- a/include/sound/soc-component.h
+++ b/include/sound/soc-component.h
@@ -3,10 +3,6 @@
* soc-component.h
*
* Copyright (c) 2019 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
*/
#ifndef __SOC_COMPONENT_H
#define __SOC_COMPONENT_H
@@ -51,8 +47,10 @@ struct snd_soc_component_driver {
unsigned int reg, unsigned int val);
/* pcm creation and destruction */
- int (*pcm_new)(struct snd_soc_pcm_runtime *rtd);
- void (*pcm_free)(struct snd_pcm *pcm);
+ int (*pcm_construct)(struct snd_soc_component *component,
+ struct snd_soc_pcm_runtime *rtd);
+ void (*pcm_destruct)(struct snd_soc_component *component,
+ struct snd_pcm *pcm);
/* component wide operations */
int (*set_sysclk)(struct snd_soc_component *component,
@@ -74,7 +72,42 @@ struct snd_soc_component_driver {
int (*set_bias_level)(struct snd_soc_component *component,
enum snd_soc_bias_level level);
- const struct snd_pcm_ops *ops;
+ int (*open)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+ int (*close)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+ int (*ioctl)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ unsigned int cmd, void *arg);
+ int (*hw_params)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params);
+ int (*hw_free)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+ int (*prepare)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+ int (*trigger)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int cmd);
+ int (*sync_stop)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+ snd_pcm_uframes_t (*pointer)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream);
+ int (*get_time_info)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, struct timespec64 *system_ts,
+ struct timespec64 *audio_ts,
+ struct snd_pcm_audio_tstamp_config *audio_tstamp_config,
+ struct snd_pcm_audio_tstamp_report *audio_tstamp_report);
+ int (*copy_user)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream, int channel,
+ unsigned long pos, void __user *buf,
+ unsigned long bytes);
+ struct page *(*page)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ unsigned long offset);
+ int (*mmap)(struct snd_soc_component *component,
+ struct snd_pcm_substream *substream,
+ struct vm_area_struct *vma);
+
const struct snd_compr_ops *compr_ops;
/* probe ordering - for components with runtime dependencies */
@@ -374,6 +407,7 @@ int snd_soc_component_of_xlate_dai_name(struct snd_soc_component *component,
int snd_soc_pcm_component_pointer(struct snd_pcm_substream *substream);
int snd_soc_pcm_component_ioctl(struct snd_pcm_substream *substream,
unsigned int cmd, void *arg);
+int snd_soc_pcm_component_sync_stop(struct snd_pcm_substream *substream);
int snd_soc_pcm_component_copy_user(struct snd_pcm_substream *substream,
int channel, unsigned long pos,
void __user *buf, unsigned long bytes);
@@ -381,7 +415,7 @@ struct page *snd_soc_pcm_component_page(struct snd_pcm_substream *substream,
unsigned long offset);
int snd_soc_pcm_component_mmap(struct snd_pcm_substream *substream,
struct vm_area_struct *vma);
-int snd_soc_pcm_component_new(struct snd_pcm *pcm);
-void snd_soc_pcm_component_free(struct snd_pcm *pcm);
+int snd_soc_pcm_component_new(struct snd_soc_pcm_runtime *rtd);
+void snd_soc_pcm_component_free(struct snd_soc_pcm_runtime *rtd);
#endif /* __SOC_COMPONENT_H */
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 939c73db6a03..eaaeb00e9e84 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -286,8 +286,6 @@ struct snd_soc_dai_driver {
/* DAI driver callbacks */
int (*probe)(struct snd_soc_dai *dai);
int (*remove)(struct snd_soc_dai *dai);
- int (*suspend)(struct snd_soc_dai *dai);
- int (*resume)(struct snd_soc_dai *dai);
/* compress dai */
int (*compress_new)(struct snd_soc_pcm_runtime *rtd, int num);
/* Optional Callback used at pcm creation*/
@@ -304,7 +302,6 @@ struct snd_soc_dai_driver {
unsigned int symmetric_rates:1;
unsigned int symmetric_channels:1;
unsigned int symmetric_samplebits:1;
- unsigned int bus_control:1; /* DAI is also used for the control bus */
/* probe ordering - for components with runtime dependencies */
int probe_order;
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 6e8a31225383..1b6afbc1a4ed 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -434,6 +434,7 @@ void snd_soc_dapm_reset_cache(struct snd_soc_dapm_context *dapm);
/* dapm events */
void snd_soc_dapm_stream_event(struct snd_soc_pcm_runtime *rtd, int stream,
int event);
+void snd_soc_dapm_stream_stop(struct snd_soc_pcm_runtime *rtd, int stream);
void snd_soc_dapm_shutdown(struct snd_soc_card *card);
/* external DAPM widget events */
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index e55aeb00ce2d..b654ebfc8766 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -103,15 +103,15 @@ struct snd_soc_dpcm_runtime {
int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */
};
-#define for_each_dpcm_fe(be, stream, dpcm) \
- list_for_each_entry(dpcm, &(be)->dpcm[stream].fe_clients, list_fe)
-
-#define for_each_dpcm_be(fe, stream, dpcm) \
- list_for_each_entry(dpcm, &(fe)->dpcm[stream].be_clients, list_be)
-#define for_each_dpcm_be_safe(fe, stream, dpcm, _dpcm) \
- list_for_each_entry_safe(dpcm, _dpcm, &(fe)->dpcm[stream].be_clients, list_be)
-#define for_each_dpcm_be_rollback(fe, stream, dpcm) \
- list_for_each_entry_continue_reverse(dpcm, &(fe)->dpcm[stream].be_clients, list_be)
+#define for_each_dpcm_fe(be, stream, _dpcm) \
+ list_for_each_entry(_dpcm, &(be)->dpcm[stream].fe_clients, list_fe)
+
+#define for_each_dpcm_be(fe, stream, _dpcm) \
+ list_for_each_entry(_dpcm, &(fe)->dpcm[stream].be_clients, list_be)
+#define for_each_dpcm_be_safe(fe, stream, _dpcm, __dpcm) \
+ list_for_each_entry_safe(_dpcm, __dpcm, &(fe)->dpcm[stream].be_clients, list_be)
+#define for_each_dpcm_be_rollback(fe, stream, _dpcm) \
+ list_for_each_entry_continue_reverse(_dpcm, &(fe)->dpcm[stream].be_clients, list_be)
/* can this BE stop and free */
int snd_soc_dpcm_can_be_free_stop(struct snd_soc_pcm_runtime *fe,
diff --git a/include/sound/soc.h b/include/sound/soc.h
index f264c6509f00..8a2266676b2d 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -299,6 +299,12 @@
.put = snd_soc_bytes_put, .private_value = \
((unsigned long)&(struct soc_bytes) \
{.base = xbase, .num_regs = xregs }) }
+#define SND_SOC_BYTES_E(xname, xbase, xregs, xhandler_get, xhandler_put) \
+{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
+ .info = snd_soc_bytes_info, .get = xhandler_get, \
+ .put = xhandler_put, .private_value = \
+ ((unsigned long)&(struct soc_bytes) \
+ {.base = xbase, .num_regs = xregs }) }
#define SND_SOC_BYTES_MASK(xname, xbase, xregs, xmask) \
{ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
@@ -458,10 +464,8 @@ static inline int snd_soc_new_compress(struct snd_soc_pcm_runtime *rtd, int num)
void snd_soc_disconnect_sync(struct device *dev);
-struct snd_pcm_substream *snd_soc_get_dai_substream(struct snd_soc_card *card,
- const char *dai_link, int stream);
struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
- const char *dai_link);
+ struct snd_soc_dai_link *dai_link);
bool snd_soc_runtime_ignore_pmdown_time(struct snd_soc_pcm_runtime *rtd);
void snd_soc_runtime_activate(struct snd_soc_pcm_runtime *rtd, int stream);
@@ -732,17 +736,9 @@ struct snd_soc_compr_ops {
int (*trigger)(struct snd_compr_stream *);
};
-struct snd_soc_rtdcom_list {
- struct snd_soc_component *component;
- struct list_head list; /* rtd::component_list */
-};
struct snd_soc_component*
snd_soc_rtdcom_lookup(struct snd_soc_pcm_runtime *rtd,
const char *driver_name);
-#define for_each_rtdcom(rtd, rtdcom) \
- list_for_each_entry(rtdcom, &(rtd)->component_list, list)
-#define for_each_rtdcom_safe(rtd, rtdcom1, rtdcom2) \
- list_for_each_entry_safe(rtdcom1, rtdcom2, &(rtd)->component_list, list)
struct snd_soc_dai_link_component {
const char *name;
@@ -844,8 +840,9 @@ struct snd_soc_dai_link {
/* Do not create a PCM for this DAI link (Backend link) */
unsigned int ignore:1;
- struct list_head list; /* DAI link list of the soc card */
+#ifdef CONFIG_SND_SOC_TOPOLOGY
struct snd_soc_dobj dobj; /* For topology */
+#endif
};
#define for_each_link_codecs(link, i, codec) \
for ((i) = 0; \
@@ -942,6 +939,7 @@ struct snd_soc_dai_link {
#define COMP_CODEC(_name, _dai) { .name = _name, .dai_name = _dai, }
#define COMP_PLATFORM(_name) { .name = _name }
#define COMP_AUX(_name) { .name = _name }
+#define COMP_CODEC_CONF(_name) { .name = _name }
#define COMP_DUMMY() { .name = "snd-soc-dummy", .dai_name = "snd-soc-dummy-dai", }
extern struct snd_soc_dai_link_component null_dailink_component[0];
@@ -952,8 +950,7 @@ struct snd_soc_codec_conf {
* specify device either by device name, or by
* DT/OF node, but not both.
*/
- const char *dev_name;
- struct device_node *of_node;
+ struct snd_soc_dai_link_component dlc;
/*
* optional map of kcontrol, widget and path name prefixes that are
@@ -978,7 +975,10 @@ struct snd_soc_card {
const char *name;
const char *long_name;
const char *driver_name;
+ const char *components;
+#ifdef CONFIG_DMI
char dmi_longname[80];
+#endif /* CONFIG_DMI */
char topology_shortname[32];
struct device *dev;
@@ -1026,7 +1026,6 @@ struct snd_soc_card {
/* CPU <--> Codec DAI links */
struct snd_soc_dai_link *dai_link; /* predefined links only */
int num_links; /* predefined links only */
- struct list_head dai_link_list; /* all links */
struct list_head rtd_list;
int num_rtd;
@@ -1096,11 +1095,6 @@ struct snd_soc_card {
((i) < (card)->num_aux_devs) && ((aux) = &(card)->aux_dev[i]); \
(i)++)
-#define for_each_card_links(card, link) \
- list_for_each_entry(link, &(card)->dai_link_list, list)
-#define for_each_card_links_safe(card, link, _link) \
- list_for_each_entry_safe(link, _link, &(card)->dai_link_list, list)
-
#define for_each_card_rtds(card, rtd) \
list_for_each_entry(rtd, &(card)->rtd_list, list)
#define for_each_card_rtds_safe(card, rtd, _rtd) \
@@ -1139,26 +1133,33 @@ struct snd_soc_pcm_runtime {
unsigned int num_codecs;
struct delayed_work delayed_work;
+ void (*close_delayed_work_func)(struct snd_soc_pcm_runtime *rtd);
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_dpcm_root;
#endif
unsigned int num; /* 0-based and monotonic increasing */
struct list_head list; /* rtd list of the soc card */
- struct list_head component_list; /* list of connected components */
/* bit field */
- unsigned int dev_registered:1;
unsigned int pop_wait:1;
unsigned int fe_compr:1; /* for Dynamic PCM */
+
+ int num_components;
+ struct snd_soc_component *components[0]; /* CPU/Codec/Platform */
};
+#define for_each_rtd_components(rtd, i, component) \
+ for ((i) = 0; \
+ ((i) < rtd->num_components) && ((component) = rtd->components[i]);\
+ (i)++)
#define for_each_rtd_codec_dai(rtd, i, dai)\
for ((i) = 0; \
((i) < rtd->num_codecs) && ((dai) = rtd->codec_dais[i]); \
(i)++)
#define for_each_rtd_codec_dai_rollback(rtd, i, dai) \
- for (; ((--i) >= 0) && ((dai) = rtd->codec_dais[i]);)
+ for (; (--(i) >= 0) && ((dai) = rtd->codec_dais[i]);)
+void snd_soc_close_delayed_work(struct snd_soc_pcm_runtime *rtd);
/* mixer control */
struct soc_mixer_control {
@@ -1168,7 +1169,9 @@ struct soc_mixer_control {
unsigned int sign_bit;
unsigned int invert:1;
unsigned int autodisable:1;
+#ifdef CONFIG_SND_SOC_TOPOLOGY
struct snd_soc_dobj dobj;
+#endif
};
struct soc_bytes {
@@ -1179,8 +1182,9 @@ struct soc_bytes {
struct soc_bytes_ext {
int max;
+#ifdef CONFIG_SND_SOC_TOPOLOGY
struct snd_soc_dobj dobj;
-
+#endif
/* used for TLV byte control */
int (*get)(struct snd_kcontrol *kcontrol, unsigned int __user *bytes,
unsigned int size);
@@ -1204,7 +1208,9 @@ struct soc_enum {
const char * const *texts;
const unsigned int *values;
unsigned int autodisable:1;
+#ifdef CONFIG_SND_SOC_TOPOLOGY
struct snd_soc_dobj dobj;
+#endif
};
/* device driver data */
@@ -1317,16 +1323,15 @@ int snd_soc_of_get_dai_link_codecs(struct device *dev,
struct snd_soc_dai_link *dai_link);
void snd_soc_of_put_dai_link_codecs(struct snd_soc_dai_link *dai_link);
-int snd_soc_add_dai_link(struct snd_soc_card *card,
- struct snd_soc_dai_link *dai_link);
-void snd_soc_remove_dai_link(struct snd_soc_card *card,
- struct snd_soc_dai_link *dai_link);
-struct snd_soc_dai_link *snd_soc_find_dai_link(struct snd_soc_card *card,
- int id, const char *name,
- const char *stream_name);
-
-int snd_soc_register_dai(struct snd_soc_component *component,
- struct snd_soc_dai_driver *dai_drv);
+int snd_soc_add_pcm_runtime(struct snd_soc_card *card,
+ struct snd_soc_dai_link *dai_link);
+void snd_soc_remove_pcm_runtime(struct snd_soc_card *card,
+ struct snd_soc_pcm_runtime *rtd);
+
+struct snd_soc_dai *snd_soc_register_dai(struct snd_soc_component *component,
+ struct snd_soc_dai_driver *dai_drv,
+ bool legacy_dai_naming);
+void snd_soc_unregister_dai(struct snd_soc_dai *dai);
struct snd_soc_dai *snd_soc_find_dai(
const struct snd_soc_dai_link_component *dlc);
diff --git a/include/sound/sof.h b/include/sound/sof.h
index 4640566b54fe..a0cbca021230 100644
--- a/include/sound/sof.h
+++ b/include/sound/sof.h
@@ -22,7 +22,6 @@ struct snd_sof_dsp_ops;
*/
struct snd_sof_pdata {
const struct firmware *fw;
- const char *drv_name;
const char *name;
const char *platform;
@@ -61,6 +60,9 @@ struct sof_dev_desc {
/* list of machines using this configuration */
struct snd_soc_acpi_mach *machines;
+ /* alternate list of machines using this configuration */
+ struct snd_soc_acpi_mach *alt_machines;
+
/* Platform resource indexes in BAR / ACPI resources. */
/* Must set to -1 if not used - add new items to end */
int resindex_lpe_base;
@@ -81,20 +83,18 @@ struct sof_dev_desc {
const void *chip_info;
/* defaults for no codec mode */
- const char *nocodec_fw_filename;
const char *nocodec_tplg_filename;
/* defaults paths for firmware and topology files */
const char *default_fw_path;
const char *default_tplg_path;
+ /* default firmware name */
+ const char *default_fw_filename;
+
const struct snd_sof_dsp_ops *ops;
- const struct sof_arch_ops *arch_ops;
};
int sof_nocodec_setup(struct device *dev,
- struct snd_sof_pdata *sof_pdata,
- struct snd_soc_acpi_mach *mach,
- const struct sof_dev_desc *desc,
const struct snd_sof_dsp_ops *ops);
#endif
diff --git a/include/sound/sof/channel_map.h b/include/sound/sof/channel_map.h
new file mode 100644
index 000000000000..21044eb5f377
--- /dev/null
+++ b/include/sound/sof/channel_map.h
@@ -0,0 +1,61 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * Copyright(c) 2019 Intel Corporation. All rights reserved.
+ */
+
+#ifndef __IPC_CHANNEL_MAP_H__
+#define __IPC_CHANNEL_MAP_H__
+
+#include <uapi/sound/sof/header.h>
+#include <sound/sof/header.h>
+
+/**
+ * \brief Channel map, specifies transformation of one-to-many or many-to-one.
+ *
+ * In case of one-to-many specifies how the output channels are computed out of
+ * a single source channel,
+ * in case of many-to-one specifies how a single target channel is computed
+ * from a multichannel input stream.
+ *
+ * Channel index specifies position of the channel in the stream on the 'one'
+ * side.
+ *
+ * Ext ID is the identifier of external part of the transformation. Depending
+ * on the context, it may be pipeline ID, dai ID, ...
+ *
+ * Channel mask describes which channels are taken into account on the "many"
+ * side. Bit[i] set to 1 means that i-th channel is used for computation
+ * (either as source or as a target).
+ *
+ * Channel mask is followed by array of coefficients in Q2.30 format,
+ * one per each channel set in the mask (left to right, LS bit set in the
+ * mask corresponds to ch_coeffs[0]).
+ */
+struct sof_ipc_channel_map {
+ uint32_t ch_index;
+ uint32_t ext_id;
+ uint32_t ch_mask;
+ uint32_t reserved;
+ int32_t ch_coeffs[0];
+} __packed;
+
+/**
+ * \brief Complete map for each channel of a multichannel stream.
+ *
+ * num_ch_map Specifies number of items in the ch_map.
+ * More than one transformation per a single channel is allowed (in case
+ * multiple external entities are transformed).
+ * A channel may be skipped in the transformation list, then it is filled
+ * with 0's by the transformation function.
+ */
+struct sof_ipc_stream_map {
+ struct sof_ipc_cmd_hdr hdr;
+ uint32_t num_ch_map;
+ uint32_t reserved[3];
+ struct sof_ipc_channel_map ch_map[0];
+} __packed;
+
+#endif /* __IPC_CHANNEL_MAP_H__ */
diff --git a/include/sound/sof/dai-imx.h b/include/sound/sof/dai-imx.h
new file mode 100644
index 000000000000..ff9088dcc6f2
--- /dev/null
+++ b/include/sound/sof/dai-imx.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
+/*
+ * Copyright 2019 NXP
+ *
+ * Author: Daniel Baluta <daniel.baluta@nxp.com>
+ */
+
+#ifndef __INCLUDE_SOUND_SOF_DAI_IMX_H__
+#define __INCLUDE_SOUND_SOF_DAI_IMX_H__
+
+#include <sound/sof/header.h>
+
+/* ESAI Configuration Request - SOF_IPC_DAI_ESAI_CONFIG */
+struct sof_ipc_dai_esai_params {
+ struct sof_ipc_hdr hdr;
+
+ /* MCLK */
+ uint16_t reserved1;
+ uint16_t mclk_id;
+ uint32_t mclk_direction;
+
+ uint32_t mclk_rate; /* MCLK frequency in Hz */
+ uint32_t fsync_rate; /* FSYNC frequency in Hz */
+ uint32_t bclk_rate; /* BCLK frequency in Hz */
+
+ /* TDM */
+ uint32_t tdm_slots;
+ uint32_t rx_slots;
+ uint32_t tx_slots;
+ uint16_t tdm_slot_width;
+ uint16_t reserved2; /* alignment */
+} __packed;
+
+/* SAI Configuration Request - SOF_IPC_DAI_SAI_CONFIG */
+struct sof_ipc_dai_sai_params {
+ struct sof_ipc_hdr hdr;
+
+ /* MCLK */
+ uint16_t reserved1;
+ uint16_t mclk_id;
+ uint32_t mclk_direction;
+
+ uint32_t mclk_rate; /* MCLK frequency in Hz */
+ uint32_t fsync_rate; /* FSYNC frequency in Hz */
+ uint32_t bclk_rate; /* BCLK frequency in Hz */
+
+ /* TDM */
+ uint32_t tdm_slots;
+ uint32_t rx_slots;
+ uint32_t tx_slots;
+ uint16_t tdm_slot_width;
+ uint16_t reserved2; /* alignment */
+} __packed;
+#endif
diff --git a/include/sound/sof/dai.h b/include/sound/sof/dai.h
index 0f1235022146..2565edd336f1 100644
--- a/include/sound/sof/dai.h
+++ b/include/sound/sof/dai.h
@@ -11,6 +11,7 @@
#include <sound/sof/header.h>
#include <sound/sof/dai-intel.h>
+#include <sound/sof/dai-imx.h>
/*
* DAI Configuration.
@@ -73,6 +74,8 @@ struct sof_ipc_dai_config {
struct sof_ipc_dai_dmic_params dmic;
struct sof_ipc_dai_hda_params hda;
struct sof_ipc_dai_alh_params alh;
+ struct sof_ipc_dai_esai_params esai;
+ struct sof_ipc_dai_sai_params sai;
};
} __packed;
diff --git a/include/sound/sof/header.h b/include/sound/sof/header.h
index 10f00c08dbb7..bf3edd9c08b4 100644
--- a/include/sound/sof/header.h
+++ b/include/sound/sof/header.h
@@ -9,6 +9,7 @@
#ifndef __INCLUDE_SOUND_SOF_HEADER_H__
#define __INCLUDE_SOUND_SOF_HEADER_H__
+#include <linux/types.h>
#include <uapi/sound/sof/abi.h>
/** \addtogroup sof_uapi uAPI
@@ -74,6 +75,7 @@
#define SOF_IPC_PM_CLK_GET SOF_CMD_TYPE(0x005)
#define SOF_IPC_PM_CLK_REQ SOF_CMD_TYPE(0x006)
#define SOF_IPC_PM_CORE_ENABLE SOF_CMD_TYPE(0x007)
+#define SOF_IPC_PM_GATE SOF_CMD_TYPE(0x008)
/* component runtime config - multiple different types */
#define SOF_IPC_COMP_SET_VALUE SOF_CMD_TYPE(0x001)
diff --git a/include/sound/sof/info.h b/include/sound/sof/info.h
index a9156b4a062c..1c560144996c 100644
--- a/include/sound/sof/info.h
+++ b/include/sound/sof/info.h
@@ -30,6 +30,7 @@
enum sof_ipc_ext_data {
SOF_IPC_EXT_DMA_BUFFER = 0,
SOF_IPC_EXT_WINDOW,
+ SOF_IPC_EXT_CC_INFO,
};
/* FW version - SOF_IPC_GLB_VERSION */
@@ -115,4 +116,18 @@ struct sof_ipc_window {
struct sof_ipc_window_elem window[];
} __packed;
+struct sof_ipc_cc_version {
+ struct sof_ipc_ext_data_hdr ext_hdr;
+ uint32_t major;
+ uint32_t minor;
+ uint32_t micro;
+
+ /* reserved for future use */
+ uint32_t reserved[4];
+
+ char name[16]; /* null terminated compiler name */
+ char optim[4]; /* null terminated compiler -O flag value */
+ char desc[]; /* null terminated compiler description */
+} __packed;
+
#endif
diff --git a/include/sound/sof/pm.h b/include/sound/sof/pm.h
index 003879401d63..3cf2e0f39d94 100644
--- a/include/sound/sof/pm.h
+++ b/include/sound/sof/pm.h
@@ -45,4 +45,12 @@ struct sof_ipc_pm_core_config {
uint32_t enable_mask;
} __packed;
+struct sof_ipc_pm_gate {
+ struct sof_ipc_cmd_hdr hdr;
+ uint32_t flags; /* platform specific */
+
+ /* reserved for future use */
+ uint32_t reserved[5];
+} __packed;
+
#endif
diff --git a/include/sound/sof/stream.h b/include/sound/sof/stream.h
index 0b71b381b952..7facefb541b3 100644
--- a/include/sound/sof/stream.h
+++ b/include/sound/sof/stream.h
@@ -83,10 +83,10 @@ struct sof_ipc_stream_params {
uint16_t sample_valid_bytes;
uint16_t sample_container_bytes;
- /* for notifying host period has completed - 0 means no period IRQ */
uint32_t host_period_bytes;
+ uint16_t no_stream_position; /**< 1 means don't send stream position */
- uint32_t reserved[2];
+ uint16_t reserved[3];
uint16_t chmap[SOF_IPC_MAX_CHANNELS]; /**< channel map - SOF_CHMAP_ */
} __packed;
diff --git a/include/sound/sof/topology.h b/include/sound/sof/topology.h
index c47b36240920..8e76178fedf0 100644
--- a/include/sound/sof/topology.h
+++ b/include/sound/sof/topology.h
@@ -36,6 +36,7 @@ enum sof_comp_type {
SOF_COMP_KPB, /* A key phrase buffer component */
SOF_COMP_SELECTOR, /**< channel selector component */
SOF_COMP_DEMUX,
+ SOF_COMP_ASRC, /**< Asynchronous sample rate converter */
/* keep FILEREAD/FILEWRITE as the last ones */
SOF_COMP_FILEREAD = 10000, /**< host test based file IO */
SOF_COMP_FILEWRITE = 10001, /**< host test based file IO */
@@ -147,6 +148,32 @@ struct sof_ipc_comp_src {
uint32_t rate_mask; /**< SOF_RATE_ supported rates */
} __packed;
+/* generic ASRC component */
+struct sof_ipc_comp_asrc {
+ struct sof_ipc_comp comp;
+ struct sof_ipc_comp_config config;
+ /* either source or sink rate must be non zero */
+ uint32_t source_rate; /**< Define fixed source rate or */
+ /**< use 0 to indicate need to get */
+ /**< the rate from stream */
+ uint32_t sink_rate; /**< Define fixed sink rate or */
+ /**< use 0 to indicate need to get */
+ /**< the rate from stream */
+ uint32_t asynchronous_mode; /**< synchronous 0, asynchronous 1 */
+ /**< When 1 the ASRC tracks and */
+ /**< compensates for drift. */
+ uint32_t operation_mode; /**< push 0, pull 1, In push mode the */
+ /**< ASRC consumes a defined number */
+ /**< of frames at input, with varying */
+ /**< number of frames at output. */
+ /**< In pull mode the ASRC outputs */
+ /**< a defined number of frames while */
+ /**< number of input frames varies. */
+
+ /* reserved for future use */
+ uint32_t reserved[4];
+} __attribute__((packed));
+
/* generic MUX component */
struct sof_ipc_comp_mux {
struct sof_ipc_comp comp;
diff --git a/include/sound/timer.h b/include/sound/timer.h
index 199c36295a0d..23e885d31525 100644
--- a/include/sound/timer.h
+++ b/include/sound/timer.h
@@ -89,7 +89,7 @@ struct snd_timer_instance {
unsigned long ticks, unsigned long resolution);
void (*ccallback) (struct snd_timer_instance * timeri,
int event,
- struct timespec * tstamp,
+ struct timespec64 * tstamp,
unsigned long resolution);
void (*disconnect)(struct snd_timer_instance *timeri);
void *callback_data;
@@ -113,13 +113,15 @@ struct snd_timer_instance {
*/
int snd_timer_new(struct snd_card *card, char *id, struct snd_timer_id *tid, struct snd_timer **rtimer);
-void snd_timer_notify(struct snd_timer *timer, int event, struct timespec *tstamp);
+void snd_timer_notify(struct snd_timer *timer, int event, struct timespec64 *tstamp);
int snd_timer_global_new(char *id, int device, struct snd_timer **rtimer);
int snd_timer_global_free(struct snd_timer *timer);
int snd_timer_global_register(struct snd_timer *timer);
-int snd_timer_open(struct snd_timer_instance **ti, char *owner, struct snd_timer_id *tid, unsigned int slave_id);
-int snd_timer_close(struct snd_timer_instance *timeri);
+struct snd_timer_instance *snd_timer_instance_new(const char *owner);
+void snd_timer_instance_free(struct snd_timer_instance *timeri);
+int snd_timer_open(struct snd_timer_instance *timeri, struct snd_timer_id *tid, unsigned int slave_id);
+void snd_timer_close(struct snd_timer_instance *timeri);
unsigned long snd_timer_resolution(struct snd_timer_instance *timeri);
int snd_timer_start(struct snd_timer_instance *timeri, unsigned int ticks);
int snd_timer_stop(struct snd_timer_instance *timeri);
diff --git a/include/sound/vx_core.h b/include/sound/vx_core.h
index 84569ddf85e1..1ddd3036bdfc 100644
--- a/include/sound/vx_core.h
+++ b/include/sound/vx_core.h
@@ -147,8 +147,8 @@ struct vx_core {
/* ports are defined externally */
/* low-level functions */
- struct snd_vx_hardware *hw;
- struct snd_vx_ops *ops;
+ const struct snd_vx_hardware *hw;
+ const struct snd_vx_ops *ops;
struct mutex lock;
@@ -193,8 +193,9 @@ struct vx_core {
/*
* constructor
*/
-struct vx_core *snd_vx_create(struct snd_card *card, struct snd_vx_hardware *hw,
- struct snd_vx_ops *ops, int extra_size);
+struct vx_core *snd_vx_create(struct snd_card *card,
+ const struct snd_vx_hardware *hw,
+ const struct snd_vx_ops *ops, int extra_size);
int snd_vx_setup_firmware(struct vx_core *chip);
int snd_vx_load_boot_image(struct vx_core *chip, const struct firmware *dsp);
int snd_vx_dsp_boot(struct vx_core *chip, const struct firmware *dsp);
diff --git a/include/sound/wm8904.h b/include/sound/wm8904.h
index 14074405f501..88ac1870510e 100644
--- a/include/sound/wm8904.h
+++ b/include/sound/wm8904.h
@@ -120,7 +120,7 @@
* DRC configurations are specified with a label and a set of register
* values to write (the enable bits will be ignored). At runtime an
* enumerated control will be presented for each DRC block allowing
- * the user to choose the configration to use.
+ * the user to choose the configuration to use.
*
* Configurations may be generated by hand or by using the DRC control
* panel provided by the WISCE - see http://www.wolfsonmicro.com/wisce/
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 7c9716fe731e..1728e883b7b2 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -876,7 +876,6 @@ struct se_portal_group {
/* Spinlock for adding/removing sessions */
spinlock_t session_lock;
struct mutex tpg_lun_mutex;
- struct list_head se_tpg_node;
/* linked list for initiator ACL list */
struct list_head acl_node_list;
struct hlist_head tpg_lun_hlist;
diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h
index d6e556c0a085..b04c29270973 100644
--- a/include/trace/bpf_probe.h
+++ b/include/trace/bpf_probe.h
@@ -74,11 +74,12 @@ static inline void bpf_test_probe_##call(void) \
{ \
check_trace_callback_type_##call(__bpf_trace_##template); \
} \
+typedef void (*btf_trace_##call)(void *__data, proto); \
static struct bpf_raw_event_map __used \
__attribute__((section("__bpf_raw_tp_map"))) \
__bpf_trace_tp_map_##call = { \
.tp = &__tracepoint_##call, \
- .bpf_func = (void *)__bpf_trace_##template, \
+ .bpf_func = (void *)(btf_trace_##call)__bpf_trace_##template, \
.num_args = COUNT_ARGS(args), \
.writable_size = size, \
};
diff --git a/include/trace/events/afs.h b/include/trace/events/afs.h
index d5ec4fac82ae..c612cabbc378 100644
--- a/include/trace/events/afs.h
+++ b/include/trace/events/afs.h
@@ -233,7 +233,7 @@ enum afs_cb_break_reason {
EM(afs_call_trace_get, "GET ") \
EM(afs_call_trace_put, "PUT ") \
EM(afs_call_trace_wake, "WAKE ") \
- E_(afs_call_trace_work, "WORK ")
+ E_(afs_call_trace_work, "QUEUE")
#define afs_server_traces \
EM(afs_server_trace_alloc, "ALLOC ") \
@@ -915,9 +915,9 @@ TRACE_EVENT(afs_call_state,
TRACE_EVENT(afs_lookup,
TP_PROTO(struct afs_vnode *dvnode, const struct qstr *name,
- struct afs_vnode *vnode),
+ struct afs_fid *fid),
- TP_ARGS(dvnode, name, vnode),
+ TP_ARGS(dvnode, name, fid),
TP_STRUCT__entry(
__field_struct(struct afs_fid, dfid )
@@ -928,13 +928,7 @@ TRACE_EVENT(afs_lookup,
TP_fast_assign(
int __len = min_t(int, name->len, 23);
__entry->dfid = dvnode->fid;
- if (vnode) {
- __entry->fid = vnode->fid;
- } else {
- __entry->fid.vid = 0;
- __entry->fid.vnode = 0;
- __entry->fid.unique = 0;
- }
+ __entry->fid = *fid;
memcpy(__entry->name, name->name, __len);
__entry->name[__len] = 0;
),
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index e4526f85c19d..0bddea663b3b 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -275,7 +275,8 @@ TRACE_EVENT(bcache_btree_write,
__entry->keys = b->keys.set[b->keys.nsets].data->keys;
),
- TP_printk("bucket %zu", __entry->bucket)
+ TP_printk("bucket %zu written block %u + %u",
+ __entry->bucket, __entry->block, __entry->keys)
);
DEFINE_EVENT(btree_node, bcache_btree_node_alloc,
diff --git a/include/trace/events/bridge.h b/include/trace/events/bridge.h
index 8ea966448b58..6b200059c2c5 100644
--- a/include/trace/events/bridge.h
+++ b/include/trace/events/bridge.h
@@ -95,16 +95,16 @@ TRACE_EVENT(fdb_delete,
TRACE_EVENT(br_fdb_update,
TP_PROTO(struct net_bridge *br, struct net_bridge_port *source,
- const unsigned char *addr, u16 vid, bool added_by_user),
+ const unsigned char *addr, u16 vid, unsigned long flags),
- TP_ARGS(br, source, addr, vid, added_by_user),
+ TP_ARGS(br, source, addr, vid, flags),
TP_STRUCT__entry(
__string(br_dev, br->dev->name)
__string(dev, source->dev->name)
__array(unsigned char, addr, ETH_ALEN)
__field(u16, vid)
- __field(bool, added_by_user)
+ __field(unsigned long, flags)
),
TP_fast_assign(
@@ -112,14 +112,14 @@ TRACE_EVENT(br_fdb_update,
__assign_str(dev, source->dev->name);
memcpy(__entry->addr, addr, ETH_ALEN);
__entry->vid = vid;
- __entry->added_by_user = added_by_user;
+ __entry->flags = flags;
),
- TP_printk("br_dev %s source %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u added_by_user %d",
+ TP_printk("br_dev %s source %s addr %02x:%02x:%02x:%02x:%02x:%02x vid %u flags 0x%lx",
__get_str(br_dev), __get_str(dev), __entry->addr[0],
__entry->addr[1], __entry->addr[2], __entry->addr[3],
__entry->addr[4], __entry->addr[5], __entry->vid,
- __entry->added_by_user)
+ __entry->flags)
);
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index 75ae1899452b..bcbc763b8814 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -19,7 +19,7 @@ struct btrfs_delayed_ref_node;
struct btrfs_delayed_tree_ref;
struct btrfs_delayed_data_ref;
struct btrfs_delayed_ref_head;
-struct btrfs_block_group_cache;
+struct btrfs_block_group;
struct btrfs_free_cluster;
struct map_lookup;
struct extent_buffer;
@@ -81,13 +81,14 @@ TRACE_DEFINE_ENUM(COMMIT_TRANS);
#define show_extent_io_tree_owner(owner) \
__print_symbolic(owner, \
- { IO_TREE_FS_INFO_FREED_EXTENTS0, "FREED_EXTENTS0" }, \
- { IO_TREE_FS_INFO_FREED_EXTENTS1, "FREED_EXTENTS1" }, \
+ { IO_TREE_FS_PINNED_EXTENTS, "PINNED_EXTENTS" }, \
+ { IO_TREE_FS_EXCLUDED_EXTENTS, "EXCLUDED_EXTENTS" }, \
{ IO_TREE_INODE_IO, "INODE_IO" }, \
{ IO_TREE_INODE_IO_FAILURE, "INODE_IO_FAILURE" }, \
{ IO_TREE_RELOC_BLOCKS, "RELOC_BLOCKS" }, \
{ IO_TREE_TRANS_DIRTY_PAGES, "TRANS_DIRTY_PAGES" }, \
{ IO_TREE_ROOT_DIRTY_LOG_PAGES, "ROOT_DIRTY_LOG_PAGES" }, \
+ { IO_TREE_INODE_FILE_EXTENT, "INODE_FILE_EXTENT" }, \
{ IO_TREE_SELFTEST, "SELFTEST" })
#define BTRFS_GROUP_FLAGS \
@@ -170,7 +171,7 @@ DECLARE_EVENT_CLASS(btrfs__inode,
TP_STRUCT__entry_btrfs(
__field( u64, ino )
- __field( blkcnt_t, blocks )
+ __field( u64, blocks )
__field( u64, disk_i_size )
__field( u64, generation )
__field( u64, last_trans )
@@ -194,7 +195,7 @@ DECLARE_EVENT_CLASS(btrfs__inode,
show_root_type(__entry->root_objectid),
__entry->generation,
__entry->ino,
- (unsigned long long)__entry->blocks,
+ __entry->blocks,
__entry->disk_i_size,
__entry->last_trans,
__entry->logged_trans)
@@ -292,7 +293,7 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
TRACE_EVENT(btrfs_handle_em_exist,
- TP_PROTO(struct btrfs_fs_info *fs_info,
+ TP_PROTO(const struct btrfs_fs_info *fs_info,
const struct extent_map *existing, const struct extent_map *map,
u64 start, u64 len),
@@ -330,8 +331,8 @@ TRACE_EVENT(btrfs_handle_em_exist,
/* file extent item */
DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular,
- TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
- struct btrfs_file_extent_item *fi, u64 start),
+ TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l,
+ const struct btrfs_file_extent_item *fi, u64 start),
TP_ARGS(bi, l, fi, start),
@@ -385,8 +386,8 @@ DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular,
DECLARE_EVENT_CLASS(
btrfs__file_extent_item_inline,
- TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
- struct btrfs_file_extent_item *fi, int slot, u64 start),
+ TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l,
+ const struct btrfs_file_extent_item *fi, int slot, u64 start),
TP_ARGS(bi, l, fi, slot, start),
@@ -426,8 +427,8 @@ DECLARE_EVENT_CLASS(
DEFINE_EVENT(
btrfs__file_extent_item_regular, btrfs_get_extent_show_fi_regular,
- TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
- struct btrfs_file_extent_item *fi, u64 start),
+ TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l,
+ const struct btrfs_file_extent_item *fi, u64 start),
TP_ARGS(bi, l, fi, start)
);
@@ -435,8 +436,8 @@ DEFINE_EVENT(
DEFINE_EVENT(
btrfs__file_extent_item_regular, btrfs_truncate_show_fi_regular,
- TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
- struct btrfs_file_extent_item *fi, u64 start),
+ TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l,
+ const struct btrfs_file_extent_item *fi, u64 start),
TP_ARGS(bi, l, fi, start)
);
@@ -444,8 +445,8 @@ DEFINE_EVENT(
DEFINE_EVENT(
btrfs__file_extent_item_inline, btrfs_get_extent_show_fi_inline,
- TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
- struct btrfs_file_extent_item *fi, int slot, u64 start),
+ TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l,
+ const struct btrfs_file_extent_item *fi, int slot, u64 start),
TP_ARGS(bi, l, fi, slot, start)
);
@@ -453,8 +454,8 @@ DEFINE_EVENT(
DEFINE_EVENT(
btrfs__file_extent_item_inline, btrfs_truncate_show_fi_inline,
- TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
- struct btrfs_file_extent_item *fi, int slot, u64 start),
+ TP_PROTO(const struct btrfs_inode *bi, const struct extent_buffer *l,
+ const struct btrfs_file_extent_item *fi, int slot, u64 start),
TP_ARGS(bi, l, fi, slot, start)
);
@@ -468,7 +469,6 @@ DEFINE_EVENT(
{ (1 << BTRFS_ORDERED_PREALLOC), "PREALLOC" }, \
{ (1 << BTRFS_ORDERED_DIRECT), "DIRECT" }, \
{ (1 << BTRFS_ORDERED_IOERR), "IOERR" }, \
- { (1 << BTRFS_ORDERED_UPDATED_ISIZE), "UPDATED_ISIZE" }, \
{ (1 << BTRFS_ORDERED_TRUNCATED), "TRUNCATED" })
@@ -496,9 +496,9 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
TP_fast_assign_btrfs(btrfs_sb(inode->i_sb),
__entry->ino = btrfs_ino(BTRFS_I(inode));
__entry->file_offset = ordered->file_offset;
- __entry->start = ordered->start;
- __entry->len = ordered->len;
- __entry->disk_len = ordered->disk_len;
+ __entry->start = ordered->disk_bytenr;
+ __entry->len = ordered->num_bytes;
+ __entry->disk_len = ordered->disk_num_bytes;
__entry->bytes_left = ordered->bytes_left;
__entry->flags = ordered->flags;
__entry->compress_type = ordered->compress_type;
@@ -574,7 +574,7 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
__field( char, for_kupdate )
__field( char, for_reclaim )
__field( char, range_cyclic )
- __field( pgoff_t, writeback_index )
+ __field( unsigned long, writeback_index )
__field( u64, root_objectid )
),
@@ -603,7 +603,7 @@ DECLARE_EVENT_CLASS(btrfs__writepage,
__entry->range_start, __entry->range_end,
__entry->for_kupdate,
__entry->for_reclaim, __entry->range_cyclic,
- (unsigned long)__entry->writeback_index)
+ __entry->writeback_index)
);
DEFINE_EVENT(btrfs__writepage, __extent_writepage,
@@ -622,7 +622,7 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
TP_STRUCT__entry_btrfs(
__field( u64, ino )
- __field( pgoff_t, index )
+ __field( unsigned long, index )
__field( u64, start )
__field( u64, end )
__field( int, uptodate )
@@ -642,7 +642,7 @@ TRACE_EVENT(btrfs_writepage_end_io_hook,
TP_printk_btrfs("root=%llu(%s) ino=%llu page_index=%lu start=%llu "
"end=%llu uptodate=%d",
show_root_type(__entry->root_objectid),
- __entry->ino, (unsigned long)__entry->index,
+ __entry->ino, __entry->index,
__entry->start,
__entry->end, __entry->uptodate)
);
@@ -699,7 +699,7 @@ TRACE_EVENT(btrfs_sync_fs,
TRACE_EVENT(btrfs_add_block_group,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- const struct btrfs_block_group_cache *block_group, int create),
+ const struct btrfs_block_group *block_group, int create),
TP_ARGS(fs_info, block_group, create),
@@ -713,11 +713,10 @@ TRACE_EVENT(btrfs_add_block_group,
),
TP_fast_assign_btrfs(fs_info,
- __entry->offset = block_group->key.objectid;
- __entry->size = block_group->key.offset;
+ __entry->offset = block_group->start;
+ __entry->size = block_group->length;
__entry->flags = block_group->flags;
- __entry->bytes_used =
- btrfs_block_group_used(&block_group->item);
+ __entry->bytes_used = block_group->used;
__entry->bytes_super = block_group->bytes_super;
__entry->create = create;
),
@@ -1018,7 +1017,7 @@ TRACE_EVENT(btrfs_cow_block,
TRACE_EVENT(btrfs_space_reservation,
- TP_PROTO(const struct btrfs_fs_info *fs_info, char *type, u64 val,
+ TP_PROTO(const struct btrfs_fs_info *fs_info, const char *type, u64 val,
u64 bytes, int reserve),
TP_ARGS(fs_info, type, val, bytes, reserve),
@@ -1051,7 +1050,7 @@ TRACE_EVENT(btrfs_space_reservation,
TRACE_EVENT(btrfs_trigger_flush,
TP_PROTO(const struct btrfs_fs_info *fs_info, u64 flags, u64 bytes,
- int flush, char *reason),
+ int flush, const char *reason),
TP_ARGS(fs_info, flags, bytes, flush, reason),
@@ -1185,7 +1184,7 @@ TRACE_EVENT(find_free_extent,
DECLARE_EVENT_CLASS(btrfs__reserve_extent,
- TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start,
+ TP_PROTO(const struct btrfs_block_group *block_group, u64 start,
u64 len),
TP_ARGS(block_group, start, len),
@@ -1198,7 +1197,7 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
),
TP_fast_assign_btrfs(block_group->fs_info,
- __entry->bg_objectid = block_group->key.objectid;
+ __entry->bg_objectid = block_group->start;
__entry->flags = block_group->flags;
__entry->start = start;
__entry->len = len;
@@ -1215,7 +1214,7 @@ DECLARE_EVENT_CLASS(btrfs__reserve_extent,
DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent,
- TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start,
+ TP_PROTO(const struct btrfs_block_group *block_group, u64 start,
u64 len),
TP_ARGS(block_group, start, len)
@@ -1223,7 +1222,7 @@ DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent,
DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster,
- TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start,
+ TP_PROTO(const struct btrfs_block_group *block_group, u64 start,
u64 len),
TP_ARGS(block_group, start, len)
@@ -1231,7 +1230,7 @@ DEFINE_EVENT(btrfs__reserve_extent, btrfs_reserve_extent_cluster,
TRACE_EVENT(btrfs_find_cluster,
- TP_PROTO(const struct btrfs_block_group_cache *block_group, u64 start,
+ TP_PROTO(const struct btrfs_block_group *block_group, u64 start,
u64 bytes, u64 empty_size, u64 min_bytes),
TP_ARGS(block_group, start, bytes, empty_size, min_bytes),
@@ -1246,7 +1245,7 @@ TRACE_EVENT(btrfs_find_cluster,
),
TP_fast_assign_btrfs(block_group->fs_info,
- __entry->bg_objectid = block_group->key.objectid;
+ __entry->bg_objectid = block_group->start;
__entry->flags = block_group->flags;
__entry->start = start;
__entry->bytes = bytes;
@@ -1264,7 +1263,7 @@ TRACE_EVENT(btrfs_find_cluster,
TRACE_EVENT(btrfs_failed_cluster_setup,
- TP_PROTO(const struct btrfs_block_group_cache *block_group),
+ TP_PROTO(const struct btrfs_block_group *block_group),
TP_ARGS(block_group),
@@ -1273,7 +1272,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup,
),
TP_fast_assign_btrfs(block_group->fs_info,
- __entry->bg_objectid = block_group->key.objectid;
+ __entry->bg_objectid = block_group->start;
),
TP_printk_btrfs("block_group=%llu", __entry->bg_objectid)
@@ -1281,7 +1280,7 @@ TRACE_EVENT(btrfs_failed_cluster_setup,
TRACE_EVENT(btrfs_setup_cluster,
- TP_PROTO(const struct btrfs_block_group_cache *block_group,
+ TP_PROTO(const struct btrfs_block_group *block_group,
const struct btrfs_free_cluster *cluster,
u64 size, int bitmap),
@@ -1297,7 +1296,7 @@ TRACE_EVENT(btrfs_setup_cluster,
),
TP_fast_assign_btrfs(block_group->fs_info,
- __entry->bg_objectid = block_group->key.objectid;
+ __entry->bg_objectid = block_group->start;
__entry->flags = block_group->flags;
__entry->start = cluster->window_start;
__entry->max_size = cluster->max_size;
@@ -1325,17 +1324,17 @@ TRACE_EVENT(alloc_extent_state,
TP_STRUCT__entry(
__field(const struct extent_state *, state)
__field(gfp_t, mask)
- __field(unsigned long, ip)
+ __field(const void*, ip)
),
TP_fast_assign(
__entry->state = state,
__entry->mask = mask,
- __entry->ip = IP
+ __entry->ip = (const void *)IP
),
TP_printk("state=%p mask=%s caller=%pS", __entry->state,
- show_gfp_flags(__entry->mask), (const void *)__entry->ip)
+ show_gfp_flags(__entry->mask), __entry->ip)
);
TRACE_EVENT(free_extent_state,
@@ -1346,16 +1345,15 @@ TRACE_EVENT(free_extent_state,
TP_STRUCT__entry(
__field(const struct extent_state *, state)
- __field(unsigned long, ip)
+ __field(const void*, ip)
),
TP_fast_assign(
__entry->state = state,
- __entry->ip = IP
+ __entry->ip = (const void *)IP
),
- TP_printk("state=%p caller=%pS", __entry->state,
- (const void *)__entry->ip)
+ TP_printk("state=%p caller=%pS", __entry->state, __entry->ip)
);
DECLARE_EVENT_CLASS(btrfs__work,
@@ -1389,9 +1387,9 @@ DECLARE_EVENT_CLASS(btrfs__work,
);
/*
- * For situiations when the work is freed, we pass fs_info and a tag that that
- * matches address of the work structure so it can be paired with the
- * scheduling event.
+ * For situations when the work is freed, we pass fs_info and a tag that matches
+ * the address of the work structure so it can be paired with the scheduling
+ * event. DO NOT add anything here that dereferences wtag.
*/
DECLARE_EVENT_CLASS(btrfs__work__done,
@@ -1567,8 +1565,7 @@ DECLARE_EVENT_CLASS(btrfs_qgroup_extent,
),
TP_printk_btrfs("bytenr=%llu num_bytes=%llu",
- (unsigned long long)__entry->bytenr,
- (unsigned long long)__entry->num_bytes)
+ __entry->bytenr, __entry->num_bytes)
);
DEFINE_EVENT(btrfs_qgroup_extent, btrfs_qgroup_account_extents,
@@ -1644,7 +1641,7 @@ TRACE_EVENT(btrfs_qgroup_account_extent,
TRACE_EVENT(qgroup_update_counters,
TP_PROTO(const struct btrfs_fs_info *fs_info,
- struct btrfs_qgroup *qgroup,
+ const struct btrfs_qgroup *qgroup,
u64 cur_old_count, u64 cur_new_count),
TP_ARGS(fs_info, qgroup, cur_old_count, cur_new_count),
@@ -1825,7 +1822,7 @@ DEFINE_EVENT(btrfs__prelim_ref, btrfs_prelim_ref_insert,
);
TRACE_EVENT(btrfs_inode_mod_outstanding_extents,
- TP_PROTO(struct btrfs_root *root, u64 ino, int mod),
+ TP_PROTO(const struct btrfs_root *root, u64 ino, int mod),
TP_ARGS(root, ino, mod),
@@ -1847,7 +1844,7 @@ TRACE_EVENT(btrfs_inode_mod_outstanding_extents,
);
DECLARE_EVENT_CLASS(btrfs__block_group,
- TP_PROTO(const struct btrfs_block_group_cache *bg_cache),
+ TP_PROTO(const struct btrfs_block_group *bg_cache),
TP_ARGS(bg_cache),
@@ -1859,9 +1856,9 @@ DECLARE_EVENT_CLASS(btrfs__block_group,
),
TP_fast_assign_btrfs(bg_cache->fs_info,
- __entry->bytenr = bg_cache->key.objectid,
- __entry->len = bg_cache->key.offset,
- __entry->used = btrfs_block_group_used(&bg_cache->item);
+ __entry->bytenr = bg_cache->start,
+ __entry->len = bg_cache->length,
+ __entry->used = bg_cache->used;
__entry->flags = bg_cache->flags;
),
@@ -1871,19 +1868,19 @@ DECLARE_EVENT_CLASS(btrfs__block_group,
);
DEFINE_EVENT(btrfs__block_group, btrfs_remove_block_group,
- TP_PROTO(const struct btrfs_block_group_cache *bg_cache),
+ TP_PROTO(const struct btrfs_block_group *bg_cache),
TP_ARGS(bg_cache)
);
DEFINE_EVENT(btrfs__block_group, btrfs_add_unused_block_group,
- TP_PROTO(const struct btrfs_block_group_cache *bg_cache),
+ TP_PROTO(const struct btrfs_block_group *bg_cache),
TP_ARGS(bg_cache)
);
DEFINE_EVENT(btrfs__block_group, btrfs_skip_unused_block_group,
- TP_PROTO(const struct btrfs_block_group_cache *bg_cache),
+ TP_PROTO(const struct btrfs_block_group *bg_cache),
TP_ARGS(bg_cache)
);
@@ -1906,7 +1903,7 @@ TRACE_EVENT(btrfs_set_extent_bit,
TP_fast_assign_btrfs(tree->fs_info,
__entry->owner = tree->owner;
if (tree->private_data) {
- struct inode *inode = tree->private_data;
+ const struct inode *inode = tree->private_data;
__entry->ino = btrfs_ino(BTRFS_I(inode));
__entry->rootid =
@@ -1945,7 +1942,7 @@ TRACE_EVENT(btrfs_clear_extent_bit,
TP_fast_assign_btrfs(tree->fs_info,
__entry->owner = tree->owner;
if (tree->private_data) {
- struct inode *inode = tree->private_data;
+ const struct inode *inode = tree->private_data;
__entry->ino = btrfs_ino(BTRFS_I(inode));
__entry->rootid =
@@ -1985,7 +1982,7 @@ TRACE_EVENT(btrfs_convert_extent_bit,
TP_fast_assign_btrfs(tree->fs_info,
__entry->owner = tree->owner;
if (tree->private_data) {
- struct inode *inode = tree->private_data;
+ const struct inode *inode = tree->private_data;
__entry->ino = btrfs_ino(BTRFS_I(inode));
__entry->rootid =
@@ -2094,8 +2091,8 @@ DEFINE_BTRFS_LOCK_EVENT(btrfs_tree_read_lock_atomic);
DECLARE_EVENT_CLASS(btrfs__space_info_update,
- TP_PROTO(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *sinfo, u64 old, s64 diff),
+ TP_PROTO(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_space_info *sinfo, u64 old, s64 diff),
TP_ARGS(fs_info, sinfo, old, diff),
@@ -2117,16 +2114,16 @@ DECLARE_EVENT_CLASS(btrfs__space_info_update,
DEFINE_EVENT(btrfs__space_info_update, update_bytes_may_use,
- TP_PROTO(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *sinfo, u64 old, s64 diff),
+ TP_PROTO(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_space_info *sinfo, u64 old, s64 diff),
TP_ARGS(fs_info, sinfo, old, diff)
);
DEFINE_EVENT(btrfs__space_info_update, update_bytes_pinned,
- TP_PROTO(struct btrfs_fs_info *fs_info,
- struct btrfs_space_info *sinfo, u64 old, s64 diff),
+ TP_PROTO(const struct btrfs_fs_info *fs_info,
+ const struct btrfs_space_info *sinfo, u64 old, s64 diff),
TP_ARGS(fs_info, sinfo, old, diff)
);
diff --git a/include/trace/events/cgroup.h b/include/trace/events/cgroup.h
index a566cc521476..7f42a3de59e6 100644
--- a/include/trace/events/cgroup.h
+++ b/include/trace/events/cgroup.h
@@ -66,7 +66,7 @@ DECLARE_EVENT_CLASS(cgroup,
TP_fast_assign(
__entry->root = cgrp->root->hierarchy_id;
- __entry->id = cgrp->id;
+ __entry->id = cgroup_id(cgrp);
__entry->level = cgrp->level;
__assign_str(path, path);
),
@@ -135,7 +135,7 @@ DECLARE_EVENT_CLASS(cgroup_migrate,
TP_fast_assign(
__entry->dst_root = dst_cgrp->root->hierarchy_id;
- __entry->dst_id = dst_cgrp->id;
+ __entry->dst_id = cgroup_id(dst_cgrp);
__entry->dst_level = dst_cgrp->level;
__assign_str(dst_path, path);
__entry->pid = task->pid;
@@ -179,7 +179,7 @@ DECLARE_EVENT_CLASS(cgroup_event,
TP_fast_assign(
__entry->root = cgrp->root->hierarchy_id;
- __entry->id = cgrp->id;
+ __entry->id = cgroup_id(cgrp);
__entry->level = cgrp->level;
__assign_str(path, path);
__entry->val = val;
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index d68e9e536814..19c87661eeec 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -48,6 +48,16 @@ struct partial_cluster;
{ EXT4_GET_BLOCKS_KEEP_SIZE, "KEEP_SIZE" }, \
{ EXT4_GET_BLOCKS_ZERO, "ZERO" })
+/*
+ * __print_flags() requires that all enum values be wrapped in the
+ * TRACE_DEFINE_ENUM macro so that the enum value can be encoded in the ftrace
+ * ring buffer.
+ */
+TRACE_DEFINE_ENUM(BH_New);
+TRACE_DEFINE_ENUM(BH_Mapped);
+TRACE_DEFINE_ENUM(BH_Unwritten);
+TRACE_DEFINE_ENUM(BH_Boundary);
+
#define show_mflags(flags) __print_flags(flags, "", \
{ EXT4_MAP_NEW, "N" }, \
{ EXT4_MAP_MAPPED, "M" }, \
@@ -62,11 +72,18 @@ struct partial_cluster;
{ EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER,"1ST_CLUSTER" },\
{ EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER, "LAST_CLUSTER" })
+TRACE_DEFINE_ENUM(ES_WRITTEN_B);
+TRACE_DEFINE_ENUM(ES_UNWRITTEN_B);
+TRACE_DEFINE_ENUM(ES_DELAYED_B);
+TRACE_DEFINE_ENUM(ES_HOLE_B);
+TRACE_DEFINE_ENUM(ES_REFERENCED_B);
+
#define show_extent_status(status) __print_flags(status, "", \
{ EXTENT_STATUS_WRITTEN, "W" }, \
{ EXTENT_STATUS_UNWRITTEN, "U" }, \
{ EXTENT_STATUS_DELAYED, "D" }, \
- { EXTENT_STATUS_HOLE, "H" })
+ { EXTENT_STATUS_HOLE, "H" }, \
+ { EXTENT_STATUS_REFERENCED, "R" })
#define show_falloc_mode(mode) __print_flags(mode, "|", \
{ FALLOC_FL_KEEP_SIZE, "KEEP_SIZE"}, \
@@ -1746,15 +1763,16 @@ TRACE_EVENT(ext4_load_inode,
TRACE_EVENT(ext4_journal_start,
TP_PROTO(struct super_block *sb, int blocks, int rsv_blocks,
- unsigned long IP),
+ int revoke_creds, unsigned long IP),
- TP_ARGS(sb, blocks, rsv_blocks, IP),
+ TP_ARGS(sb, blocks, rsv_blocks, revoke_creds, IP),
TP_STRUCT__entry(
__field( dev_t, dev )
__field(unsigned long, ip )
__field( int, blocks )
__field( int, rsv_blocks )
+ __field( int, revoke_creds )
),
TP_fast_assign(
@@ -1762,11 +1780,13 @@ TRACE_EVENT(ext4_journal_start,
__entry->ip = IP;
__entry->blocks = blocks;
__entry->rsv_blocks = rsv_blocks;
+ __entry->revoke_creds = revoke_creds;
),
- TP_printk("dev %d,%d blocks, %d rsv_blocks, %d caller %pS",
- MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->blocks, __entry->rsv_blocks, (void *)__entry->ip)
+ TP_printk("dev %d,%d blocks %d, rsv_blocks %d, revoke_creds %d, "
+ "caller %pS", MAJOR(__entry->dev), MINOR(__entry->dev),
+ __entry->blocks, __entry->rsv_blocks, __entry->revoke_creds,
+ (void *)__entry->ip)
);
TRACE_EVENT(ext4_journal_start_reserved,
@@ -2262,7 +2282,7 @@ DECLARE_EVENT_CLASS(ext4__es_extent,
__entry->ino = inode->i_ino;
__entry->lblk = es->es_lblk;
__entry->len = es->es_len;
- __entry->pblk = ext4_es_pblock(es);
+ __entry->pblk = ext4_es_show_pblock(es);
__entry->status = ext4_es_status(es);
),
@@ -2351,7 +2371,7 @@ TRACE_EVENT(ext4_es_find_extent_range_exit,
__entry->ino = inode->i_ino;
__entry->lblk = es->es_lblk;
__entry->len = es->es_len;
- __entry->pblk = ext4_es_pblock(es);
+ __entry->pblk = ext4_es_show_pblock(es);
__entry->status = ext4_es_status(es);
),
@@ -2405,7 +2425,7 @@ TRACE_EVENT(ext4_es_lookup_extent_exit,
__entry->ino = inode->i_ino;
__entry->lblk = es->es_lblk;
__entry->len = es->es_len;
- __entry->pblk = ext4_es_pblock(es);
+ __entry->pblk = ext4_es_show_pblock(es);
__entry->status = ext4_es_status(es);
__entry->found = found;
),
@@ -2573,7 +2593,7 @@ TRACE_EVENT(ext4_es_insert_delayed_block,
__entry->ino = inode->i_ino;
__entry->lblk = es->es_lblk;
__entry->len = es->es_len;
- __entry->pblk = ext4_es_pblock(es);
+ __entry->pblk = ext4_es_show_pblock(es);
__entry->status = ext4_es_status(es);
__entry->allocated = allocated;
),
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index 1796ff99c3e9..67a97838c2a0 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -49,6 +49,7 @@ TRACE_DEFINE_ENUM(CP_SYNC);
TRACE_DEFINE_ENUM(CP_RECOVERY);
TRACE_DEFINE_ENUM(CP_DISCARD);
TRACE_DEFINE_ENUM(CP_TRIMMED);
+TRACE_DEFINE_ENUM(CP_PAUSE);
#define show_block_type(type) \
__print_symbolic(type, \
@@ -124,13 +125,14 @@ TRACE_DEFINE_ENUM(CP_TRIMMED);
{ CP_SYNC, "Sync" }, \
{ CP_RECOVERY, "Recovery" }, \
{ CP_DISCARD, "Discard" }, \
- { CP_UMOUNT, "Umount" }, \
+ { CP_PAUSE, "Pause" }, \
{ CP_TRIMMED, "Trimmed" })
#define show_fsync_cpreason(type) \
__print_symbolic(type, \
{ CP_NO_NEEDED, "no needed" }, \
{ CP_NON_REGULAR, "non regular" }, \
+ { CP_COMPRESSED, "compreesed" }, \
{ CP_HARDLINK, "hardlink" }, \
{ CP_SB_NEED_CP, "sb needs cp" }, \
{ CP_WRONG_PINO, "wrong pino" }, \
@@ -148,6 +150,11 @@ TRACE_DEFINE_ENUM(CP_TRIMMED);
{ F2FS_GOING_DOWN_METAFLUSH, "meta flush" }, \
{ F2FS_GOING_DOWN_NEED_FSCK, "need fsck" })
+#define show_compress_algorithm(type) \
+ __print_symbolic(type, \
+ { COMPRESS_LZO, "LZO" }, \
+ { COMPRESS_LZ4, "LZ4" })
+
struct f2fs_sb_info;
struct f2fs_io_info;
struct extent_info;
@@ -1710,6 +1717,100 @@ TRACE_EVENT(f2fs_shutdown,
__entry->ret)
);
+DECLARE_EVENT_CLASS(f2fs_zip_start,
+
+ TP_PROTO(struct inode *inode, pgoff_t cluster_idx,
+ unsigned int cluster_size, unsigned char algtype),
+
+ TP_ARGS(inode, cluster_idx, cluster_size, algtype),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(pgoff_t, idx)
+ __field(unsigned int, size)
+ __field(unsigned int, algtype)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->idx = cluster_idx;
+ __entry->size = cluster_size;
+ __entry->algtype = algtype;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, cluster_idx:%lu, "
+ "cluster_size = %u, algorithm = %s",
+ show_dev_ino(__entry),
+ __entry->idx,
+ __entry->size,
+ show_compress_algorithm(__entry->algtype))
+);
+
+DECLARE_EVENT_CLASS(f2fs_zip_end,
+
+ TP_PROTO(struct inode *inode, pgoff_t cluster_idx,
+ unsigned int compressed_size, int ret),
+
+ TP_ARGS(inode, cluster_idx, compressed_size, ret),
+
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(ino_t, ino)
+ __field(pgoff_t, idx)
+ __field(unsigned int, size)
+ __field(unsigned int, ret)
+ ),
+
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->idx = cluster_idx;
+ __entry->size = compressed_size;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("dev = (%d,%d), ino = %lu, cluster_idx:%lu, "
+ "compressed_size = %u, ret = %d",
+ show_dev_ino(__entry),
+ __entry->idx,
+ __entry->size,
+ __entry->ret)
+);
+
+DEFINE_EVENT(f2fs_zip_start, f2fs_compress_pages_start,
+
+ TP_PROTO(struct inode *inode, pgoff_t cluster_idx,
+ unsigned int cluster_size, unsigned char algtype),
+
+ TP_ARGS(inode, cluster_idx, cluster_size, algtype)
+);
+
+DEFINE_EVENT(f2fs_zip_start, f2fs_decompress_pages_start,
+
+ TP_PROTO(struct inode *inode, pgoff_t cluster_idx,
+ unsigned int cluster_size, unsigned char algtype),
+
+ TP_ARGS(inode, cluster_idx, cluster_size, algtype)
+);
+
+DEFINE_EVENT(f2fs_zip_end, f2fs_compress_pages_end,
+
+ TP_PROTO(struct inode *inode, pgoff_t cluster_idx,
+ unsigned int compressed_size, int ret),
+
+ TP_ARGS(inode, cluster_idx, compressed_size, ret)
+);
+
+DEFINE_EVENT(f2fs_zip_end, f2fs_decompress_pages_end,
+
+ TP_PROTO(struct inode *inode, pgoff_t cluster_idx,
+ unsigned int compressed_size, int ret),
+
+ TP_ARGS(inode, cluster_idx, compressed_size, ret)
+);
+
#endif /* _TRACE_F2FS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/filemap.h b/include/trace/events/filemap.h
index ee05db7ee8d2..796053e162d2 100644
--- a/include/trace/events/filemap.h
+++ b/include/trace/events/filemap.h
@@ -85,7 +85,7 @@ TRACE_EVENT(file_check_and_advance_wb_err,
TP_ARGS(file, old),
TP_STRUCT__entry(
- __field(struct file *, file);
+ __field(struct file *, file)
__field(unsigned long, i_ino)
__field(dev_t, s_dev)
__field(errseq_t, old)
diff --git a/include/trace/events/fsi.h b/include/trace/events/fsi.h
index 92e5e89e52ed..9832cb8e0eb0 100644
--- a/include/trace/events/fsi.h
+++ b/include/trace/events/fsi.h
@@ -26,7 +26,7 @@ TRACE_EVENT(fsi_master_read,
__entry->addr = addr;
__entry->size = size;
),
- TP_printk("fsi%d:%02d:%02d %08x[%zd]",
+ TP_printk("fsi%d:%02d:%02d %08x[%zu]",
__entry->master_idx,
__entry->link,
__entry->id,
@@ -56,7 +56,7 @@ TRACE_EVENT(fsi_master_write,
__entry->data = 0;
memcpy(&__entry->data, data, size);
),
- TP_printk("fsi%d:%02d:%02d %08x[%zd] <= {%*ph}",
+ TP_printk("fsi%d:%02d:%02d %08x[%zu] <= {%*ph}",
__entry->master_idx,
__entry->link,
__entry->id,
@@ -93,7 +93,7 @@ TRACE_EVENT(fsi_master_rw_result,
if (__entry->write || !__entry->ret)
memcpy(&__entry->data, data, size);
),
- TP_printk("fsi%d:%02d:%02d %08x[%zd] %s {%*ph} ret %d",
+ TP_printk("fsi%d:%02d:%02d %08x[%zu] %s {%*ph} ret %d",
__entry->master_idx,
__entry->link,
__entry->id,
diff --git a/include/trace/events/fsi_master_aspeed.h b/include/trace/events/fsi_master_aspeed.h
new file mode 100644
index 000000000000..a355ceacc33f
--- /dev/null
+++ b/include/trace/events/fsi_master_aspeed.h
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM fsi_master_aspeed
+
+#if !defined(_TRACE_FSI_MASTER_ASPEED_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_FSI_MASTER_ASPEED_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(fsi_master_aspeed_opb_read,
+ TP_PROTO(uint32_t addr, size_t size, uint32_t result, uint32_t status, uint32_t irq_status),
+ TP_ARGS(addr, size, result, status, irq_status),
+ TP_STRUCT__entry(
+ __field(uint32_t, addr)
+ __field(size_t, size)
+ __field(uint32_t, result)
+ __field(uint32_t, status)
+ __field(uint32_t, irq_status)
+ ),
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->size = size;
+ __entry->result = result;
+ __entry->status = status;
+ __entry->irq_status = irq_status;
+ ),
+ TP_printk("addr %08x size %zu: result %08x sts: %08x irq_sts: %08x",
+ __entry->addr, __entry->size, __entry->result,
+ __entry->status, __entry->irq_status
+ )
+);
+
+TRACE_EVENT(fsi_master_aspeed_opb_write,
+ TP_PROTO(uint32_t addr, uint32_t val, size_t size, uint32_t status, uint32_t irq_status),
+ TP_ARGS(addr, val, size, status, irq_status),
+ TP_STRUCT__entry(
+ __field(uint32_t, addr)
+ __field(uint32_t, val)
+ __field(size_t, size)
+ __field(uint32_t, status)
+ __field(uint32_t, irq_status)
+ ),
+ TP_fast_assign(
+ __entry->addr = addr;
+ __entry->val = val;
+ __entry->size = size;
+ __entry->status = status;
+ __entry->irq_status = irq_status;
+ ),
+ TP_printk("addr %08x val %08x size %zu status: %08x irq_sts: %08x",
+ __entry->addr, __entry->val, __entry->size,
+ __entry->status, __entry->irq_status
+ )
+ );
+
+TRACE_EVENT(fsi_master_aspeed_opb_error,
+ TP_PROTO(uint32_t mresp0, uint32_t mstap0, uint32_t mesrb0),
+ TP_ARGS(mresp0, mstap0, mesrb0),
+ TP_STRUCT__entry(
+ __field(uint32_t, mresp0)
+ __field(uint32_t, mstap0)
+ __field(uint32_t, mesrb0)
+ ),
+ TP_fast_assign(
+ __entry->mresp0 = mresp0;
+ __entry->mstap0 = mstap0;
+ __entry->mesrb0 = mesrb0;
+ ),
+ TP_printk("mresp0 %08x mstap0 %08x mesrb0 %08x",
+ __entry->mresp0, __entry->mstap0, __entry->mesrb0
+ )
+ );
+
+#endif
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/huge_memory.h b/include/trace/events/huge_memory.h
index dd4db334bd63..d82a0f4e824d 100644
--- a/include/trace/events/huge_memory.h
+++ b/include/trace/events/huge_memory.h
@@ -31,7 +31,8 @@
EM( SCAN_ALLOC_HUGE_PAGE_FAIL, "alloc_huge_page_failed") \
EM( SCAN_CGROUP_CHARGE_FAIL, "ccgroup_charge_failed") \
EM( SCAN_EXCEED_SWAP_PTE, "exceed_swap_pte") \
- EMe(SCAN_TRUNCATED, "truncated") \
+ EM( SCAN_TRUNCATED, "truncated") \
+ EMe(SCAN_PAGE_HAS_PRIVATE, "page_has_private") \
#undef EM
#undef EMe
diff --git a/include/trace/events/intel_iommu.h b/include/trace/events/intel_iommu.h
index 54e61d456cdf..112bd06487bf 100644
--- a/include/trace/events/intel_iommu.h
+++ b/include/trace/events/intel_iommu.h
@@ -49,12 +49,6 @@ DEFINE_EVENT(dma_map, map_single,
TP_ARGS(dev, dev_addr, phys_addr, size)
);
-DEFINE_EVENT(dma_map, map_sg,
- TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
- size_t size),
- TP_ARGS(dev, dev_addr, phys_addr, size)
-);
-
DEFINE_EVENT(dma_map, bounce_map_single,
TP_PROTO(struct device *dev, dma_addr_t dev_addr, phys_addr_t phys_addr,
size_t size),
@@ -99,6 +93,48 @@ DEFINE_EVENT(dma_unmap, bounce_unmap_single,
TP_ARGS(dev, dev_addr, size)
);
+DECLARE_EVENT_CLASS(dma_map_sg,
+ TP_PROTO(struct device *dev, int index, int total,
+ struct scatterlist *sg),
+
+ TP_ARGS(dev, index, total, sg),
+
+ TP_STRUCT__entry(
+ __string(dev_name, dev_name(dev))
+ __field(dma_addr_t, dev_addr)
+ __field(phys_addr_t, phys_addr)
+ __field(size_t, size)
+ __field(int, index)
+ __field(int, total)
+ ),
+
+ TP_fast_assign(
+ __assign_str(dev_name, dev_name(dev));
+ __entry->dev_addr = sg->dma_address;
+ __entry->phys_addr = sg_phys(sg);
+ __entry->size = sg->dma_length;
+ __entry->index = index;
+ __entry->total = total;
+ ),
+
+ TP_printk("dev=%s [%d/%d] dev_addr=0x%llx phys_addr=0x%llx size=%zu",
+ __get_str(dev_name), __entry->index, __entry->total,
+ (unsigned long long)__entry->dev_addr,
+ (unsigned long long)__entry->phys_addr,
+ __entry->size)
+);
+
+DEFINE_EVENT(dma_map_sg, map_sg,
+ TP_PROTO(struct device *dev, int index, int total,
+ struct scatterlist *sg),
+ TP_ARGS(dev, index, total, sg)
+);
+
+DEFINE_EVENT(dma_map_sg, bounce_map_sg,
+ TP_PROTO(struct device *dev, int index, int total,
+ struct scatterlist *sg),
+ TP_ARGS(dev, index, total, sg)
+);
#endif /* _TRACE_INTEL_IOMMU_H */
/* This part must be outside protection */
diff --git a/include/trace/events/io_uring.h b/include/trace/events/io_uring.h
new file mode 100644
index 000000000000..9f0d3b7d56b0
--- /dev/null
+++ b/include/trace/events/io_uring.h
@@ -0,0 +1,466 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM io_uring
+
+#if !defined(_TRACE_IO_URING_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_IO_URING_H
+
+#include <linux/tracepoint.h>
+
+struct io_wq_work;
+
+/**
+ * io_uring_create - called after a new io_uring context was prepared
+ *
+ * @fd: corresponding file descriptor
+ * @ctx: pointer to a ring context structure
+ * @sq_entries: actual SQ size
+ * @cq_entries: actual CQ size
+ * @flags: SQ ring flags, provided to io_uring_setup(2)
+ *
+ * Allows to trace io_uring creation and provide pointer to a context, that can
+ * be used later to find correlated events.
+ */
+TRACE_EVENT(io_uring_create,
+
+ TP_PROTO(int fd, void *ctx, u32 sq_entries, u32 cq_entries, u32 flags),
+
+ TP_ARGS(fd, ctx, sq_entries, cq_entries, flags),
+
+ TP_STRUCT__entry (
+ __field( int, fd )
+ __field( void *, ctx )
+ __field( u32, sq_entries )
+ __field( u32, cq_entries )
+ __field( u32, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->fd = fd;
+ __entry->ctx = ctx;
+ __entry->sq_entries = sq_entries;
+ __entry->cq_entries = cq_entries;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("ring %p, fd %d sq size %d, cq size %d, flags %d",
+ __entry->ctx, __entry->fd, __entry->sq_entries,
+ __entry->cq_entries, __entry->flags)
+);
+
+/**
+ * io_uring_register - called after a buffer/file/eventfd was succesfully
+ * registered for a ring
+ *
+ * @ctx: pointer to a ring context structure
+ * @opcode: describes which operation to perform
+ * @nr_user_files: number of registered files
+ * @nr_user_bufs: number of registered buffers
+ * @cq_ev_fd: whether eventfs registered or not
+ * @ret: return code
+ *
+ * Allows to trace fixed files/buffers/eventfds, that could be registered to
+ * avoid an overhead of getting references to them for every operation. This
+ * event, together with io_uring_file_get, can provide a full picture of how
+ * much overhead one can reduce via fixing.
+ */
+TRACE_EVENT(io_uring_register,
+
+ TP_PROTO(void *ctx, unsigned opcode, unsigned nr_files,
+ unsigned nr_bufs, bool eventfd, long ret),
+
+ TP_ARGS(ctx, opcode, nr_files, nr_bufs, eventfd, ret),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( unsigned, opcode )
+ __field( unsigned, nr_files )
+ __field( unsigned, nr_bufs )
+ __field( bool, eventfd )
+ __field( long, ret )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->opcode = opcode;
+ __entry->nr_files = nr_files;
+ __entry->nr_bufs = nr_bufs;
+ __entry->eventfd = eventfd;
+ __entry->ret = ret;
+ ),
+
+ TP_printk("ring %p, opcode %d, nr_user_files %d, nr_user_bufs %d, "
+ "eventfd %d, ret %ld",
+ __entry->ctx, __entry->opcode, __entry->nr_files,
+ __entry->nr_bufs, __entry->eventfd, __entry->ret)
+);
+
+/**
+ * io_uring_file_get - called before getting references to an SQE file
+ *
+ * @ctx: pointer to a ring context structure
+ * @fd: SQE file descriptor
+ *
+ * Allows to trace out how often an SQE file reference is obtained, which can
+ * help figuring out if it makes sense to use fixed files, or check that fixed
+ * files are used correctly.
+ */
+TRACE_EVENT(io_uring_file_get,
+
+ TP_PROTO(void *ctx, int fd),
+
+ TP_ARGS(ctx, fd),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( int, fd )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->fd = fd;
+ ),
+
+ TP_printk("ring %p, fd %d", __entry->ctx, __entry->fd)
+);
+
+/**
+ * io_uring_queue_async_work - called before submitting a new async work
+ *
+ * @ctx: pointer to a ring context structure
+ * @hashed: type of workqueue, hashed or normal
+ * @req: pointer to a submitted request
+ * @work: pointer to a submitted io_wq_work
+ *
+ * Allows to trace asynchronous work submission.
+ */
+TRACE_EVENT(io_uring_queue_async_work,
+
+ TP_PROTO(void *ctx, int rw, void * req, struct io_wq_work *work,
+ unsigned int flags),
+
+ TP_ARGS(ctx, rw, req, work, flags),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( int, rw )
+ __field( void *, req )
+ __field( struct io_wq_work *, work )
+ __field( unsigned int, flags )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->rw = rw;
+ __entry->req = req;
+ __entry->work = work;
+ __entry->flags = flags;
+ ),
+
+ TP_printk("ring %p, request %p, flags %d, %s queue, work %p",
+ __entry->ctx, __entry->req, __entry->flags,
+ __entry->rw ? "hashed" : "normal", __entry->work)
+);
+
+/**
+ * io_uring_defer - called when an io_uring request is deferred
+ *
+ * @ctx: pointer to a ring context structure
+ * @req: pointer to a deferred request
+ * @user_data: user data associated with the request
+ *
+ * Allows to track deferred requests, to get an insight about what requests are
+ * not started immediately.
+ */
+TRACE_EVENT(io_uring_defer,
+
+ TP_PROTO(void *ctx, void *req, unsigned long long user_data),
+
+ TP_ARGS(ctx, req, user_data),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( unsigned long long, data )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->req = req;
+ __entry->data = user_data;
+ ),
+
+ TP_printk("ring %p, request %p user_data %llu", __entry->ctx,
+ __entry->req, __entry->data)
+);
+
+/**
+ * io_uring_link - called before the io_uring request added into link_list of
+ * another request
+ *
+ * @ctx: pointer to a ring context structure
+ * @req: pointer to a linked request
+ * @target_req: pointer to a previous request, that would contain @req
+ *
+ * Allows to track linked requests, to understand dependencies between requests
+ * and how does it influence their execution flow.
+ */
+TRACE_EVENT(io_uring_link,
+
+ TP_PROTO(void *ctx, void *req, void *target_req),
+
+ TP_ARGS(ctx, req, target_req),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( void *, req )
+ __field( void *, target_req )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->req = req;
+ __entry->target_req = target_req;
+ ),
+
+ TP_printk("ring %p, request %p linked after %p",
+ __entry->ctx, __entry->req, __entry->target_req)
+);
+
+/**
+ * io_uring_cqring_wait - called before start waiting for an available CQE
+ *
+ * @ctx: pointer to a ring context structure
+ * @min_events: minimal number of events to wait for
+ *
+ * Allows to track waiting for CQE, so that we can e.g. troubleshoot
+ * situations, when an application wants to wait for an event, that never
+ * comes.
+ */
+TRACE_EVENT(io_uring_cqring_wait,
+
+ TP_PROTO(void *ctx, int min_events),
+
+ TP_ARGS(ctx, min_events),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( int, min_events )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->min_events = min_events;
+ ),
+
+ TP_printk("ring %p, min_events %d", __entry->ctx, __entry->min_events)
+);
+
+/**
+ * io_uring_fail_link - called before failing a linked request
+ *
+ * @req: request, which links were cancelled
+ * @link: cancelled link
+ *
+ * Allows to track linked requests cancellation, to see not only that some work
+ * was cancelled, but also which request was the reason.
+ */
+TRACE_EVENT(io_uring_fail_link,
+
+ TP_PROTO(void *req, void *link),
+
+ TP_ARGS(req, link),
+
+ TP_STRUCT__entry (
+ __field( void *, req )
+ __field( void *, link )
+ ),
+
+ TP_fast_assign(
+ __entry->req = req;
+ __entry->link = link;
+ ),
+
+ TP_printk("request %p, link %p", __entry->req, __entry->link)
+);
+
+/**
+ * io_uring_complete - called when completing an SQE
+ *
+ * @ctx: pointer to a ring context structure
+ * @user_data: user data associated with the request
+ * @res: result of the request
+ *
+ */
+TRACE_EVENT(io_uring_complete,
+
+ TP_PROTO(void *ctx, u64 user_data, long res),
+
+ TP_ARGS(ctx, user_data, res),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( u64, user_data )
+ __field( long, res )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->user_data = user_data;
+ __entry->res = res;
+ ),
+
+ TP_printk("ring %p, user_data 0x%llx, result %ld",
+ __entry->ctx, (unsigned long long)__entry->user_data,
+ __entry->res)
+);
+
+
+/**
+ * io_uring_submit_sqe - called before submitting one SQE
+ *
+ * @ctx: pointer to a ring context structure
+ * @opcode: opcode of request
+ * @user_data: user data associated with the request
+ * @force_nonblock: whether a context blocking or not
+ * @sq_thread: true if sq_thread has submitted this SQE
+ *
+ * Allows to track SQE submitting, to understand what was the source of it, SQ
+ * thread or io_uring_enter call.
+ */
+TRACE_EVENT(io_uring_submit_sqe,
+
+ TP_PROTO(void *ctx, u8 opcode, u64 user_data, bool force_nonblock,
+ bool sq_thread),
+
+ TP_ARGS(ctx, opcode, user_data, force_nonblock, sq_thread),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( u8, opcode )
+ __field( u64, user_data )
+ __field( bool, force_nonblock )
+ __field( bool, sq_thread )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->opcode = opcode;
+ __entry->user_data = user_data;
+ __entry->force_nonblock = force_nonblock;
+ __entry->sq_thread = sq_thread;
+ ),
+
+ TP_printk("ring %p, op %d, data 0x%llx, non block %d, sq_thread %d",
+ __entry->ctx, __entry->opcode,
+ (unsigned long long) __entry->user_data,
+ __entry->force_nonblock, __entry->sq_thread)
+);
+
+TRACE_EVENT(io_uring_poll_arm,
+
+ TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask, int events),
+
+ TP_ARGS(ctx, opcode, user_data, mask, events),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( u8, opcode )
+ __field( u64, user_data )
+ __field( int, mask )
+ __field( int, events )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->opcode = opcode;
+ __entry->user_data = user_data;
+ __entry->mask = mask;
+ __entry->events = events;
+ ),
+
+ TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x, events 0x%x",
+ __entry->ctx, __entry->opcode,
+ (unsigned long long) __entry->user_data,
+ __entry->mask, __entry->events)
+);
+
+TRACE_EVENT(io_uring_poll_wake,
+
+ TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask),
+
+ TP_ARGS(ctx, opcode, user_data, mask),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( u8, opcode )
+ __field( u64, user_data )
+ __field( int, mask )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->opcode = opcode;
+ __entry->user_data = user_data;
+ __entry->mask = mask;
+ ),
+
+ TP_printk("ring %p, op %d, data 0x%llx, mask 0x%x",
+ __entry->ctx, __entry->opcode,
+ (unsigned long long) __entry->user_data,
+ __entry->mask)
+);
+
+TRACE_EVENT(io_uring_task_add,
+
+ TP_PROTO(void *ctx, u8 opcode, u64 user_data, int mask),
+
+ TP_ARGS(ctx, opcode, user_data, mask),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( u8, opcode )
+ __field( u64, user_data )
+ __field( int, mask )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->opcode = opcode;
+ __entry->user_data = user_data;
+ __entry->mask = mask;
+ ),
+
+ TP_printk("ring %p, op %d, data 0x%llx, mask %x",
+ __entry->ctx, __entry->opcode,
+ (unsigned long long) __entry->user_data,
+ __entry->mask)
+);
+
+TRACE_EVENT(io_uring_task_run,
+
+ TP_PROTO(void *ctx, u8 opcode, u64 user_data),
+
+ TP_ARGS(ctx, opcode, user_data),
+
+ TP_STRUCT__entry (
+ __field( void *, ctx )
+ __field( u8, opcode )
+ __field( u64, user_data )
+ ),
+
+ TP_fast_assign(
+ __entry->ctx = ctx;
+ __entry->opcode = opcode;
+ __entry->user_data = user_data;
+ ),
+
+ TP_printk("ring %p, op %d, data 0x%llx",
+ __entry->ctx, __entry->opcode,
+ (unsigned long long) __entry->user_data)
+);
+
+#endif /* _TRACE_IO_URING_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/jbd2.h b/include/trace/events/jbd2.h
index 2310b259329f..d16a32867f3a 100644
--- a/include/trace/events/jbd2.h
+++ b/include/trace/events/jbd2.h
@@ -133,7 +133,7 @@ TRACE_EVENT(jbd2_submit_inode_data,
(unsigned long) __entry->ino)
);
-TRACE_EVENT(jbd2_handle_start,
+DECLARE_EVENT_CLASS(jbd2_handle_start_class,
TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
unsigned int line_no, int requested_blocks),
@@ -161,6 +161,20 @@ TRACE_EVENT(jbd2_handle_start,
__entry->type, __entry->line_no, __entry->requested_blocks)
);
+DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_start,
+ TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+ unsigned int line_no, int requested_blocks),
+
+ TP_ARGS(dev, tid, type, line_no, requested_blocks)
+);
+
+DEFINE_EVENT(jbd2_handle_start_class, jbd2_handle_restart,
+ TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
+ unsigned int line_no, int requested_blocks),
+
+ TP_ARGS(dev, tid, type, line_no, requested_blocks)
+);
+
TRACE_EVENT(jbd2_handle_extend,
TP_PROTO(dev_t dev, unsigned long tid, unsigned int type,
unsigned int line_no, int buffer_credits,
diff --git a/include/trace/events/kmem.h b/include/trace/events/kmem.h
index 69e8bb8963db..f65b1f6db22d 100644
--- a/include/trace/events/kmem.h
+++ b/include/trace/events/kmem.h
@@ -88,8 +88,8 @@ DECLARE_EVENT_CLASS(kmem_alloc_node,
__entry->node = node;
),
- TP_printk("call_site=%lx ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
- __entry->call_site,
+ TP_printk("call_site=%pS ptr=%p bytes_req=%zu bytes_alloc=%zu gfp_flags=%s node=%d",
+ (void *)__entry->call_site,
__entry->ptr,
__entry->bytes_req,
__entry->bytes_alloc,
@@ -316,6 +316,53 @@ TRACE_EVENT(mm_page_alloc_extfrag,
__entry->change_ownership)
);
+/*
+ * Required for uniquely and securely identifying mm in rss_stat tracepoint.
+ */
+#ifndef __PTR_TO_HASHVAL
+static unsigned int __maybe_unused mm_ptr_to_hash(const void *ptr)
+{
+ int ret;
+ unsigned long hashval;
+
+ ret = ptr_to_hashval(ptr, &hashval);
+ if (ret)
+ return 0;
+
+ /* The hashed value is only 32-bit */
+ return (unsigned int)hashval;
+}
+#define __PTR_TO_HASHVAL
+#endif
+
+TRACE_EVENT(rss_stat,
+
+ TP_PROTO(struct mm_struct *mm,
+ int member,
+ long count),
+
+ TP_ARGS(mm, member, count),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, mm_id)
+ __field(unsigned int, curr)
+ __field(int, member)
+ __field(long, size)
+ ),
+
+ TP_fast_assign(
+ __entry->mm_id = mm_ptr_to_hash(mm);
+ __entry->curr = !!(current->mm == mm);
+ __entry->member = member;
+ __entry->size = (count << PAGE_SHIFT);
+ ),
+
+ TP_printk("mm_id=%u curr=%d member=%d size=%ldB",
+ __entry->mm_id,
+ __entry->curr,
+ __entry->member,
+ __entry->size)
+ );
#endif /* _TRACE_KMEM_H */
/* This part must be outside protection */
diff --git a/include/trace/events/page_pool.h b/include/trace/events/page_pool.h
index 47b5ee880aa9..ad0aa7f31675 100644
--- a/include/trace/events/page_pool.h
+++ b/include/trace/events/page_pool.h
@@ -8,9 +8,10 @@
#include <linux/types.h>
#include <linux/tracepoint.h>
+#include <trace/events/mmflags.h>
#include <net/page_pool.h>
-TRACE_EVENT(page_pool_inflight,
+TRACE_EVENT(page_pool_release,
TP_PROTO(const struct page_pool *pool,
s32 inflight, u32 hold, u32 release),
@@ -22,6 +23,7 @@ TRACE_EVENT(page_pool_inflight,
__field(s32, inflight)
__field(u32, hold)
__field(u32, release)
+ __field(u64, cnt)
),
TP_fast_assign(
@@ -29,10 +31,12 @@ TRACE_EVENT(page_pool_inflight,
__entry->inflight = inflight;
__entry->hold = hold;
__entry->release = release;
+ __entry->cnt = pool->destroy_cnt;
),
- TP_printk("page_pool=%p inflight=%d hold=%u release=%u",
- __entry->pool, __entry->inflight, __entry->hold, __entry->release)
+ TP_printk("page_pool=%p inflight=%d hold=%u release=%u cnt=%llu",
+ __entry->pool, __entry->inflight, __entry->hold,
+ __entry->release, __entry->cnt)
);
TRACE_EVENT(page_pool_state_release,
@@ -46,16 +50,18 @@ TRACE_EVENT(page_pool_state_release,
__field(const struct page_pool *, pool)
__field(const struct page *, page)
__field(u32, release)
+ __field(unsigned long, pfn)
),
TP_fast_assign(
__entry->pool = pool;
__entry->page = page;
__entry->release = release;
+ __entry->pfn = page_to_pfn(page);
),
- TP_printk("page_pool=%p page=%p release=%u",
- __entry->pool, __entry->page, __entry->release)
+ TP_printk("page_pool=%p page=%p pfn=%lu release=%u",
+ __entry->pool, __entry->page, __entry->pfn, __entry->release)
);
TRACE_EVENT(page_pool_state_hold,
@@ -69,16 +75,40 @@ TRACE_EVENT(page_pool_state_hold,
__field(const struct page_pool *, pool)
__field(const struct page *, page)
__field(u32, hold)
+ __field(unsigned long, pfn)
),
TP_fast_assign(
__entry->pool = pool;
__entry->page = page;
__entry->hold = hold;
+ __entry->pfn = page_to_pfn(page);
),
- TP_printk("page_pool=%p page=%p hold=%u",
- __entry->pool, __entry->page, __entry->hold)
+ TP_printk("page_pool=%p page=%p pfn=%lu hold=%u",
+ __entry->pool, __entry->page, __entry->pfn, __entry->hold)
+);
+
+TRACE_EVENT(page_pool_update_nid,
+
+ TP_PROTO(const struct page_pool *pool, int new_nid),
+
+ TP_ARGS(pool, new_nid),
+
+ TP_STRUCT__entry(
+ __field(const struct page_pool *, pool)
+ __field(int, pool_nid)
+ __field(int, new_nid)
+ ),
+
+ TP_fast_assign(
+ __entry->pool = pool;
+ __entry->pool_nid = pool->p.nid;
+ __entry->new_nid = new_nid;
+ ),
+
+ TP_printk("page_pool=%p pool_nid=%d new_nid=%d",
+ __entry->pool, __entry->pool_nid, __entry->new_nid)
);
#endif /* _TRACE_PAGE_POOL_H */
diff --git a/include/trace/events/power.h b/include/trace/events/power.h
index 7457e238e1b7..af5018aa9517 100644
--- a/include/trace/events/power.h
+++ b/include/trace/events/power.h
@@ -359,75 +359,50 @@ DEFINE_EVENT(power_domain, power_domain_target,
);
/*
- * The pm qos events are used for pm qos update
+ * CPU latency QoS events used for global CPU latency QoS list updates
*/
-DECLARE_EVENT_CLASS(pm_qos_request,
+DECLARE_EVENT_CLASS(cpu_latency_qos_request,
- TP_PROTO(int pm_qos_class, s32 value),
+ TP_PROTO(s32 value),
- TP_ARGS(pm_qos_class, value),
+ TP_ARGS(value),
TP_STRUCT__entry(
- __field( int, pm_qos_class )
__field( s32, value )
),
TP_fast_assign(
- __entry->pm_qos_class = pm_qos_class;
__entry->value = value;
),
- TP_printk("pm_qos_class=%s value=%d",
- __print_symbolic(__entry->pm_qos_class,
- { PM_QOS_CPU_DMA_LATENCY, "CPU_DMA_LATENCY" }),
+ TP_printk("CPU_DMA_LATENCY value=%d",
__entry->value)
);
-DEFINE_EVENT(pm_qos_request, pm_qos_add_request,
+DEFINE_EVENT(cpu_latency_qos_request, pm_qos_add_request,
- TP_PROTO(int pm_qos_class, s32 value),
+ TP_PROTO(s32 value),
- TP_ARGS(pm_qos_class, value)
+ TP_ARGS(value)
);
-DEFINE_EVENT(pm_qos_request, pm_qos_update_request,
+DEFINE_EVENT(cpu_latency_qos_request, pm_qos_update_request,
- TP_PROTO(int pm_qos_class, s32 value),
+ TP_PROTO(s32 value),
- TP_ARGS(pm_qos_class, value)
+ TP_ARGS(value)
);
-DEFINE_EVENT(pm_qos_request, pm_qos_remove_request,
+DEFINE_EVENT(cpu_latency_qos_request, pm_qos_remove_request,
- TP_PROTO(int pm_qos_class, s32 value),
+ TP_PROTO(s32 value),
- TP_ARGS(pm_qos_class, value)
-);
-
-TRACE_EVENT(pm_qos_update_request_timeout,
-
- TP_PROTO(int pm_qos_class, s32 value, unsigned long timeout_us),
-
- TP_ARGS(pm_qos_class, value, timeout_us),
-
- TP_STRUCT__entry(
- __field( int, pm_qos_class )
- __field( s32, value )
- __field( unsigned long, timeout_us )
- ),
-
- TP_fast_assign(
- __entry->pm_qos_class = pm_qos_class;
- __entry->value = value;
- __entry->timeout_us = timeout_us;
- ),
-
- TP_printk("pm_qos_class=%s value=%d, timeout_us=%ld",
- __print_symbolic(__entry->pm_qos_class,
- { PM_QOS_CPU_DMA_LATENCY, "CPU_DMA_LATENCY" }),
- __entry->value, __entry->timeout_us)
+ TP_ARGS(value)
);
+/*
+ * General PM QoS events used for updates of PM QoS request lists
+ */
DECLARE_EVENT_CLASS(pm_qos_update,
TP_PROTO(enum pm_qos_req_action action, int prev_value, int curr_value),
diff --git a/include/trace/events/preemptirq.h b/include/trace/events/preemptirq.h
index 95fba0471e5b..3f249e150c0c 100644
--- a/include/trace/events/preemptirq.h
+++ b/include/trace/events/preemptirq.h
@@ -18,13 +18,13 @@ DECLARE_EVENT_CLASS(preemptirq_template,
TP_ARGS(ip, parent_ip),
TP_STRUCT__entry(
- __field(u32, caller_offs)
- __field(u32, parent_offs)
+ __field(s32, caller_offs)
+ __field(s32, parent_offs)
),
TP_fast_assign(
- __entry->caller_offs = (u32)(ip - (unsigned long)_stext);
- __entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext);
+ __entry->caller_offs = (s32)(ip - (unsigned long)_stext);
+ __entry->parent_offs = (s32)(parent_ip - (unsigned long)_stext);
),
TP_printk("caller=%pS parent=%pS",
diff --git a/include/trace/events/pwm.h b/include/trace/events/pwm.h
new file mode 100644
index 000000000000..cf243de41cc8
--- /dev/null
+++ b/include/trace/events/pwm.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM pwm
+
+#if !defined(_TRACE_PWM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_PWM_H
+
+#include <linux/pwm.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(pwm,
+
+ TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state),
+
+ TP_ARGS(pwm, state),
+
+ TP_STRUCT__entry(
+ __field(struct pwm_device *, pwm)
+ __field(u64, period)
+ __field(u64, duty_cycle)
+ __field(enum pwm_polarity, polarity)
+ __field(bool, enabled)
+ ),
+
+ TP_fast_assign(
+ __entry->pwm = pwm;
+ __entry->period = state->period;
+ __entry->duty_cycle = state->duty_cycle;
+ __entry->polarity = state->polarity;
+ __entry->enabled = state->enabled;
+ ),
+
+ TP_printk("%p: period=%llu duty_cycle=%llu polarity=%d enabled=%d",
+ __entry->pwm, __entry->period, __entry->duty_cycle,
+ __entry->polarity, __entry->enabled)
+
+);
+
+DEFINE_EVENT(pwm, pwm_apply,
+
+ TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state),
+
+ TP_ARGS(pwm, state)
+
+);
+
+DEFINE_EVENT(pwm, pwm_get,
+
+ TP_PROTO(struct pwm_device *pwm, const struct pwm_state *state),
+
+ TP_ARGS(pwm, state)
+
+);
+
+#endif /* _TRACE_PWM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 694bd040cf51..f9a7811148e2 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -41,7 +41,7 @@ TRACE_EVENT(rcu_utilization,
TP_printk("%s", __entry->s)
);
-#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
+#if defined(CONFIG_TREE_RCU)
/*
* Tracepoint for grace-period events. Takes a string identifying the
@@ -93,16 +93,16 @@ TRACE_EVENT_RCU(rcu_grace_period,
* the data from the rcu_node structure, other than rcuname, which comes
* from the rcu_state structure, and event, which is one of the following:
*
- * "Startleaf": Request a grace period based on leaf-node data.
+ * "Cleanup": Clean up rcu_node structure after previous GP.
+ * "CleanupMore": Clean up, and another GP is needed.
+ * "EndWait": Complete wait.
+ * "NoGPkthread": The RCU grace-period kthread has not yet started.
* "Prestarted": Someone beat us to the request
* "Startedleaf": Leaf node marked for future GP.
* "Startedleafroot": All nodes from leaf to root marked for future GP.
* "Startedroot": Requested a nocb grace period based on root-node data.
- * "NoGPkthread": The RCU grace-period kthread has not yet started.
+ * "Startleaf": Request a grace period based on leaf-node data.
* "StartWait": Start waiting for the requested grace period.
- * "EndWait": Complete wait.
- * "Cleanup": Clean up rcu_node structure after previous GP.
- * "CleanupMore": Clean up, and another GP is needed.
*/
TRACE_EVENT_RCU(rcu_future_grace_period,
@@ -258,20 +258,27 @@ TRACE_EVENT_RCU(rcu_exp_funnel_lock,
* the number of the offloaded CPU are extracted. The third and final
* argument is a string as follows:
*
- * "WakeEmpty": Wake rcuo kthread, first CB to empty list.
- * "WakeEmptyIsDeferred": Wake rcuo kthread later, first CB to empty list.
- * "WakeOvf": Wake rcuo kthread, CB list is huge.
- * "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge.
- * "WakeNot": Don't wake rcuo kthread.
- * "WakeNotPoll": Don't wake rcuo kthread because it is polling.
- * "DeferredWake": Carried out the "IsDeferred" wakeup.
- * "Poll": Start of new polling cycle for rcu_nocb_poll.
- * "Sleep": Sleep waiting for GP for !rcu_nocb_poll.
- * "CBSleep": Sleep waiting for CBs for !rcu_nocb_poll.
- * "WokeEmpty": rcuo kthread woke to find empty list.
- * "WokeNonEmpty": rcuo kthread woke to find non-empty list.
- * "WaitQueue": Enqueue partially done, timed wait for it to complete.
- * "WokeQueue": Partial enqueue now complete.
+ * "AlreadyAwake": The to-be-awakened rcuo kthread is already awake.
+ * "Bypass": rcuo GP kthread sees non-empty ->nocb_bypass.
+ * "CBSleep": rcuo CB kthread sleeping waiting for CBs.
+ * "Check": rcuo GP kthread checking specified CPU for work.
+ * "DeferredWake": Timer expired or polled check, time to wake.
+ * "DoWake": The to-be-awakened rcuo kthread needs to be awakened.
+ * "EndSleep": Done waiting for GP for !rcu_nocb_poll.
+ * "FirstBQ": New CB to empty ->nocb_bypass (->cblist maybe non-empty).
+ * "FirstBQnoWake": FirstBQ plus rcuo kthread need not be awakened.
+ * "FirstBQwake": FirstBQ plus rcuo kthread must be awakened.
+ * "FirstQ": New CB to empty ->cblist (->nocb_bypass maybe non-empty).
+ * "NeedWaitGP": rcuo GP kthread must wait on a grace period.
+ * "Poll": Start of new polling cycle for rcu_nocb_poll.
+ * "Sleep": Sleep waiting for GP for !rcu_nocb_poll.
+ * "Timer": Deferred-wake timer expired.
+ * "WakeEmptyIsDeferred": Wake rcuo kthread later, first CB to empty list.
+ * "WakeEmpty": Wake rcuo kthread, first CB to empty list.
+ * "WakeNot": Don't wake rcuo kthread.
+ * "WakeNotPoll": Don't wake rcuo kthread because it is polling.
+ * "WakeOvfIsDeferred": Wake rcuo kthread later, CB list is huge.
+ * "WokeEmpty": rcuo CB kthread woke to find empty list.
*/
TRACE_EVENT_RCU(rcu_nocb_wake,
@@ -425,7 +432,7 @@ TRACE_EVENT_RCU(rcu_fqs,
__entry->cpu, __entry->qsevent)
);
-#endif /* #if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU) */
+#endif /* #if defined(CONFIG_TREE_RCU) */
/*
* Tracepoint for dyntick-idle entry/exit events. These take a string
@@ -442,7 +449,7 @@ TRACE_EVENT_RCU(rcu_fqs,
*/
TRACE_EVENT_RCU(rcu_dyntick,
- TP_PROTO(const char *polarity, long oldnesting, long newnesting, atomic_t dynticks),
+ TP_PROTO(const char *polarity, long oldnesting, long newnesting, int dynticks),
TP_ARGS(polarity, oldnesting, newnesting, dynticks),
@@ -457,7 +464,7 @@ TRACE_EVENT_RCU(rcu_dyntick,
__entry->polarity = polarity;
__entry->oldnesting = oldnesting;
__entry->newnesting = newnesting;
- __entry->dynticks = atomic_read(&dynticks);
+ __entry->dynticks = dynticks;
),
TP_printk("%s %lx %lx %#3x", __entry->polarity,
@@ -474,16 +481,14 @@ TRACE_EVENT_RCU(rcu_dyntick,
*/
TRACE_EVENT_RCU(rcu_callback,
- TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen_lazy,
- long qlen),
+ TP_PROTO(const char *rcuname, struct rcu_head *rhp, long qlen),
- TP_ARGS(rcuname, rhp, qlen_lazy, qlen),
+ TP_ARGS(rcuname, rhp, qlen),
TP_STRUCT__entry(
__field(const char *, rcuname)
__field(void *, rhp)
__field(void *, func)
- __field(long, qlen_lazy)
__field(long, qlen)
),
@@ -491,13 +496,12 @@ TRACE_EVENT_RCU(rcu_callback,
__entry->rcuname = rcuname;
__entry->rhp = rhp;
__entry->func = rhp->func;
- __entry->qlen_lazy = qlen_lazy;
__entry->qlen = qlen;
),
- TP_printk("%s rhp=%p func=%ps %ld/%ld",
+ TP_printk("%s rhp=%p func=%ps %ld",
__entry->rcuname, __entry->rhp, __entry->func,
- __entry->qlen_lazy, __entry->qlen)
+ __entry->qlen)
);
/*
@@ -511,15 +515,14 @@ TRACE_EVENT_RCU(rcu_callback,
TRACE_EVENT_RCU(rcu_kfree_callback,
TP_PROTO(const char *rcuname, struct rcu_head *rhp, unsigned long offset,
- long qlen_lazy, long qlen),
+ long qlen),
- TP_ARGS(rcuname, rhp, offset, qlen_lazy, qlen),
+ TP_ARGS(rcuname, rhp, offset, qlen),
TP_STRUCT__entry(
__field(const char *, rcuname)
__field(void *, rhp)
__field(unsigned long, offset)
- __field(long, qlen_lazy)
__field(long, qlen)
),
@@ -527,13 +530,12 @@ TRACE_EVENT_RCU(rcu_kfree_callback,
__entry->rcuname = rcuname;
__entry->rhp = rhp;
__entry->offset = offset;
- __entry->qlen_lazy = qlen_lazy;
__entry->qlen = qlen;
),
- TP_printk("%s rhp=%p func=%ld %ld/%ld",
+ TP_printk("%s rhp=%p func=%ld %ld",
__entry->rcuname, __entry->rhp, __entry->offset,
- __entry->qlen_lazy, __entry->qlen)
+ __entry->qlen)
);
/*
@@ -545,27 +547,24 @@ TRACE_EVENT_RCU(rcu_kfree_callback,
*/
TRACE_EVENT_RCU(rcu_batch_start,
- TP_PROTO(const char *rcuname, long qlen_lazy, long qlen, long blimit),
+ TP_PROTO(const char *rcuname, long qlen, long blimit),
- TP_ARGS(rcuname, qlen_lazy, qlen, blimit),
+ TP_ARGS(rcuname, qlen, blimit),
TP_STRUCT__entry(
__field(const char *, rcuname)
- __field(long, qlen_lazy)
__field(long, qlen)
__field(long, blimit)
),
TP_fast_assign(
__entry->rcuname = rcuname;
- __entry->qlen_lazy = qlen_lazy;
__entry->qlen = qlen;
__entry->blimit = blimit;
),
- TP_printk("%s CBs=%ld/%ld bl=%ld",
- __entry->rcuname, __entry->qlen_lazy, __entry->qlen,
- __entry->blimit)
+ TP_printk("%s CBs=%ld bl=%ld",
+ __entry->rcuname, __entry->qlen, __entry->blimit)
);
/*
@@ -625,6 +624,34 @@ TRACE_EVENT_RCU(rcu_invoke_kfree_callback,
);
/*
+ * Tracepoint for the invocation of a single RCU callback of the special
+ * kfree_bulk() form. The first argument is the RCU flavor, the second
+ * argument is a number of elements in array to free, the third is an
+ * address of the array holding nr_records entries.
+ */
+TRACE_EVENT_RCU(rcu_invoke_kfree_bulk_callback,
+
+ TP_PROTO(const char *rcuname, unsigned long nr_records, void **p),
+
+ TP_ARGS(rcuname, nr_records, p),
+
+ TP_STRUCT__entry(
+ __field(const char *, rcuname)
+ __field(unsigned long, nr_records)
+ __field(void **, p)
+ ),
+
+ TP_fast_assign(
+ __entry->rcuname = rcuname;
+ __entry->nr_records = nr_records;
+ __entry->p = p;
+ ),
+
+ TP_printk("%s bulk=0x%p nr_records=%lu",
+ __entry->rcuname, __entry->p, __entry->nr_records)
+);
+
+/*
* Tracepoint for exiting rcu_do_batch after RCU callbacks have been
* invoked. The first argument is the name of the RCU flavor,
* the second argument is number of callbacks actually invoked,
@@ -713,8 +740,7 @@ TRACE_EVENT_RCU(rcu_torture_read,
* "Begin": rcu_barrier() started.
* "EarlyExit": rcu_barrier() piggybacked, thus early exit.
* "Inc1": rcu_barrier() piggyback check counter incremented.
- * "OfflineNoCB": rcu_barrier() found callback on never-online CPU
- * "OnlineNoCB": rcu_barrier() found online no-CBs CPU.
+ * "OfflineNoCBQ": rcu_barrier() found offline no-CBs CPU with callbacks.
* "OnlineQ": rcu_barrier() found online CPU with callbacks.
* "OnlineNQ": rcu_barrier() found online CPU, no callbacks.
* "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
diff --git a/include/trace/events/rdma_core.h b/include/trace/events/rdma_core.h
new file mode 100644
index 000000000000..17642aa54437
--- /dev/null
+++ b/include/trace/events/rdma_core.h
@@ -0,0 +1,394 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Trace point definitions for core RDMA functions.
+ *
+ * Author: Chuck Lever <chuck.lever@oracle.com>
+ *
+ * Copyright (c) 2019, Oracle and/or its affiliates. All rights reserved.
+ */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM rdma_core
+
+#if !defined(_TRACE_RDMA_CORE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RDMA_CORE_H
+
+#include <linux/tracepoint.h>
+#include <rdma/ib_verbs.h>
+
+/*
+ * enum ib_poll_context, from include/rdma/ib_verbs.h
+ */
+#define IB_POLL_CTX_LIST \
+ ib_poll_ctx(DIRECT) \
+ ib_poll_ctx(SOFTIRQ) \
+ ib_poll_ctx(WORKQUEUE) \
+ ib_poll_ctx_end(UNBOUND_WORKQUEUE)
+
+#undef ib_poll_ctx
+#undef ib_poll_ctx_end
+
+#define ib_poll_ctx(x) TRACE_DEFINE_ENUM(IB_POLL_##x);
+#define ib_poll_ctx_end(x) TRACE_DEFINE_ENUM(IB_POLL_##x);
+
+IB_POLL_CTX_LIST
+
+#undef ib_poll_ctx
+#undef ib_poll_ctx_end
+
+#define ib_poll_ctx(x) { IB_POLL_##x, #x },
+#define ib_poll_ctx_end(x) { IB_POLL_##x, #x }
+
+#define rdma_show_ib_poll_ctx(x) \
+ __print_symbolic(x, IB_POLL_CTX_LIST)
+
+/**
+ ** Completion Queue events
+ **/
+
+TRACE_EVENT(cq_schedule,
+ TP_PROTO(
+ struct ib_cq *cq
+ ),
+
+ TP_ARGS(cq),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ ),
+
+ TP_fast_assign(
+ cq->timestamp = ktime_get();
+ cq->interrupt = true;
+
+ __entry->cq_id = cq->res.id;
+ ),
+
+ TP_printk("cq.id=%u", __entry->cq_id)
+);
+
+TRACE_EVENT(cq_reschedule,
+ TP_PROTO(
+ struct ib_cq *cq
+ ),
+
+ TP_ARGS(cq),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ ),
+
+ TP_fast_assign(
+ cq->timestamp = ktime_get();
+ cq->interrupt = false;
+
+ __entry->cq_id = cq->res.id;
+ ),
+
+ TP_printk("cq.id=%u", __entry->cq_id)
+);
+
+TRACE_EVENT(cq_process,
+ TP_PROTO(
+ const struct ib_cq *cq
+ ),
+
+ TP_ARGS(cq),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(bool, interrupt)
+ __field(s64, latency)
+ ),
+
+ TP_fast_assign(
+ ktime_t latency = ktime_sub(ktime_get(), cq->timestamp);
+
+ __entry->cq_id = cq->res.id;
+ __entry->latency = ktime_to_us(latency);
+ __entry->interrupt = cq->interrupt;
+ ),
+
+ TP_printk("cq.id=%u wake-up took %lld [us] from %s",
+ __entry->cq_id, __entry->latency,
+ __entry->interrupt ? "interrupt" : "reschedule"
+ )
+);
+
+TRACE_EVENT(cq_poll,
+ TP_PROTO(
+ const struct ib_cq *cq,
+ int requested,
+ int rc
+ ),
+
+ TP_ARGS(cq, requested, rc),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, requested)
+ __field(int, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cq->res.id;
+ __entry->requested = requested;
+ __entry->rc = rc;
+ ),
+
+ TP_printk("cq.id=%u requested %d, returned %d",
+ __entry->cq_id, __entry->requested, __entry->rc
+ )
+);
+
+TRACE_EVENT(cq_drain_complete,
+ TP_PROTO(
+ const struct ib_cq *cq
+ ),
+
+ TP_ARGS(cq),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cq->res.id;
+ ),
+
+ TP_printk("cq.id=%u",
+ __entry->cq_id
+ )
+);
+
+
+TRACE_EVENT(cq_modify,
+ TP_PROTO(
+ const struct ib_cq *cq,
+ u16 comps,
+ u16 usec
+ ),
+
+ TP_ARGS(cq, comps, usec),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(unsigned int, comps)
+ __field(unsigned int, usec)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cq->res.id;
+ __entry->comps = comps;
+ __entry->usec = usec;
+ ),
+
+ TP_printk("cq.id=%u comps=%u usec=%u",
+ __entry->cq_id, __entry->comps, __entry->usec
+ )
+);
+
+TRACE_EVENT(cq_alloc,
+ TP_PROTO(
+ const struct ib_cq *cq,
+ int nr_cqe,
+ int comp_vector,
+ enum ib_poll_context poll_ctx
+ ),
+
+ TP_ARGS(cq, nr_cqe, comp_vector, poll_ctx),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ __field(int, nr_cqe)
+ __field(int, comp_vector)
+ __field(unsigned long, poll_ctx)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cq->res.id;
+ __entry->nr_cqe = nr_cqe;
+ __entry->comp_vector = comp_vector;
+ __entry->poll_ctx = poll_ctx;
+ ),
+
+ TP_printk("cq.id=%u nr_cqe=%d comp_vector=%d poll_ctx=%s",
+ __entry->cq_id, __entry->nr_cqe, __entry->comp_vector,
+ rdma_show_ib_poll_ctx(__entry->poll_ctx)
+ )
+);
+
+TRACE_EVENT(cq_alloc_error,
+ TP_PROTO(
+ int nr_cqe,
+ int comp_vector,
+ enum ib_poll_context poll_ctx,
+ int rc
+ ),
+
+ TP_ARGS(nr_cqe, comp_vector, poll_ctx, rc),
+
+ TP_STRUCT__entry(
+ __field(int, rc)
+ __field(int, nr_cqe)
+ __field(int, comp_vector)
+ __field(unsigned long, poll_ctx)
+ ),
+
+ TP_fast_assign(
+ __entry->rc = rc;
+ __entry->nr_cqe = nr_cqe;
+ __entry->comp_vector = comp_vector;
+ __entry->poll_ctx = poll_ctx;
+ ),
+
+ TP_printk("nr_cqe=%d comp_vector=%d poll_ctx=%s rc=%d",
+ __entry->nr_cqe, __entry->comp_vector,
+ rdma_show_ib_poll_ctx(__entry->poll_ctx), __entry->rc
+ )
+);
+
+TRACE_EVENT(cq_free,
+ TP_PROTO(
+ const struct ib_cq *cq
+ ),
+
+ TP_ARGS(cq),
+
+ TP_STRUCT__entry(
+ __field(u32, cq_id)
+ ),
+
+ TP_fast_assign(
+ __entry->cq_id = cq->res.id;
+ ),
+
+ TP_printk("cq.id=%u", __entry->cq_id)
+);
+
+/**
+ ** Memory Region events
+ **/
+
+/*
+ * enum ib_mr_type, from include/rdma/ib_verbs.h
+ */
+#define IB_MR_TYPE_LIST \
+ ib_mr_type_item(MEM_REG) \
+ ib_mr_type_item(SG_GAPS) \
+ ib_mr_type_item(DM) \
+ ib_mr_type_item(USER) \
+ ib_mr_type_item(DMA) \
+ ib_mr_type_end(INTEGRITY)
+
+#undef ib_mr_type_item
+#undef ib_mr_type_end
+
+#define ib_mr_type_item(x) TRACE_DEFINE_ENUM(IB_MR_TYPE_##x);
+#define ib_mr_type_end(x) TRACE_DEFINE_ENUM(IB_MR_TYPE_##x);
+
+IB_MR_TYPE_LIST
+
+#undef ib_mr_type_item
+#undef ib_mr_type_end
+
+#define ib_mr_type_item(x) { IB_MR_TYPE_##x, #x },
+#define ib_mr_type_end(x) { IB_MR_TYPE_##x, #x }
+
+#define rdma_show_ib_mr_type(x) \
+ __print_symbolic(x, IB_MR_TYPE_LIST)
+
+TRACE_EVENT(mr_alloc,
+ TP_PROTO(
+ const struct ib_pd *pd,
+ enum ib_mr_type mr_type,
+ u32 max_num_sg,
+ const struct ib_mr *mr
+ ),
+
+ TP_ARGS(pd, mr_type, max_num_sg, mr),
+
+ TP_STRUCT__entry(
+ __field(u32, pd_id)
+ __field(u32, mr_id)
+ __field(u32, max_num_sg)
+ __field(int, rc)
+ __field(unsigned long, mr_type)
+ ),
+
+ TP_fast_assign(
+ __entry->pd_id = pd->res.id;
+ if (IS_ERR(mr)) {
+ __entry->mr_id = 0;
+ __entry->rc = PTR_ERR(mr);
+ } else {
+ __entry->mr_id = mr->res.id;
+ __entry->rc = 0;
+ }
+ __entry->max_num_sg = max_num_sg;
+ __entry->mr_type = mr_type;
+ ),
+
+ TP_printk("pd.id=%u mr.id=%u type=%s max_num_sg=%u rc=%d",
+ __entry->pd_id, __entry->mr_id,
+ rdma_show_ib_mr_type(__entry->mr_type),
+ __entry->max_num_sg, __entry->rc)
+);
+
+TRACE_EVENT(mr_integ_alloc,
+ TP_PROTO(
+ const struct ib_pd *pd,
+ u32 max_num_data_sg,
+ u32 max_num_meta_sg,
+ const struct ib_mr *mr
+ ),
+
+ TP_ARGS(pd, max_num_data_sg, max_num_meta_sg, mr),
+
+ TP_STRUCT__entry(
+ __field(u32, pd_id)
+ __field(u32, mr_id)
+ __field(u32, max_num_data_sg)
+ __field(u32, max_num_meta_sg)
+ __field(int, rc)
+ ),
+
+ TP_fast_assign(
+ __entry->pd_id = pd->res.id;
+ if (IS_ERR(mr)) {
+ __entry->mr_id = 0;
+ __entry->rc = PTR_ERR(mr);
+ } else {
+ __entry->mr_id = mr->res.id;
+ __entry->rc = 0;
+ }
+ __entry->max_num_data_sg = max_num_data_sg;
+ __entry->max_num_meta_sg = max_num_meta_sg;
+ ),
+
+ TP_printk("pd.id=%u mr.id=%u max_num_data_sg=%u max_num_meta_sg=%u rc=%d",
+ __entry->pd_id, __entry->mr_id, __entry->max_num_data_sg,
+ __entry->max_num_meta_sg, __entry->rc)
+);
+
+TRACE_EVENT(mr_dereg,
+ TP_PROTO(
+ const struct ib_mr *mr
+ ),
+
+ TP_ARGS(mr),
+
+ TP_STRUCT__entry(
+ __field(u32, id)
+ ),
+
+ TP_fast_assign(
+ __entry->id = mr->res.id;
+ ),
+
+ TP_printk("mr.id=%u", __entry->id)
+);
+
+#endif /* _TRACE_RDMA_CORE_H */
+
+#include <trace/define_trace.h>
diff --git a/include/trace/events/rpcgss.h b/include/trace/events/rpcgss.h
index d1f7fe1b6fe4..9827f535f032 100644
--- a/include/trace/events/rpcgss.h
+++ b/include/trace/events/rpcgss.h
@@ -126,6 +126,34 @@ DEFINE_GSSAPI_EVENT(verify_mic);
DEFINE_GSSAPI_EVENT(wrap);
DEFINE_GSSAPI_EVENT(unwrap);
+TRACE_EVENT(rpcgss_accept_upcall,
+ TP_PROTO(
+ __be32 xid,
+ u32 major_status,
+ u32 minor_status
+ ),
+
+ TP_ARGS(xid, major_status, minor_status),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(u32, minor_status)
+ __field(unsigned long, major_status)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(xid);
+ __entry->minor_status = minor_status;
+ __entry->major_status = major_status;
+ ),
+
+ TP_printk("xid=0x%08x major_status=%s (0x%08lx) minor_status=%u",
+ __entry->xid, __entry->major_status == 0 ? "GSS_S_COMPLETE" :
+ show_gss_status(__entry->major_status),
+ __entry->major_status, __entry->minor_status
+ )
+);
+
/**
** GSS auth unwrap failures
@@ -355,6 +383,23 @@ TRACE_EVENT(rpcgss_createauth,
show_pseudoflavor(__entry->flavor), __entry->error)
);
+TRACE_EVENT(rpcgss_oid_to_mech,
+ TP_PROTO(
+ const char *oid
+ ),
+
+ TP_ARGS(oid),
+
+ TP_STRUCT__entry(
+ __string(oid, oid)
+ ),
+
+ TP_fast_assign(
+ __assign_str(oid, oid);
+ ),
+
+ TP_printk("mech for oid %s was not found", __get_str(oid))
+);
#endif /* _TRACE_RPCGSS_H */
diff --git a/include/trace/events/rpcrdma.h b/include/trace/events/rpcrdma.h
index a13830616107..c0e4c93324f5 100644
--- a/include/trace/events/rpcrdma.h
+++ b/include/trace/events/rpcrdma.h
@@ -85,6 +85,44 @@ DECLARE_EVENT_CLASS(xprtrdma_rxprt,
), \
TP_ARGS(r_xprt))
+DECLARE_EVENT_CLASS(xprtrdma_connect_class,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ int rc
+ ),
+
+ TP_ARGS(r_xprt, rc),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(int, rc)
+ __field(int, connect_status)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->rc = rc;
+ __entry->connect_status = r_xprt->rx_ep.rep_connected;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connect status=%d",
+ __get_str(addr), __get_str(port), __entry->r_xprt,
+ __entry->rc, __entry->connect_status
+ )
+);
+
+#define DEFINE_CONN_EVENT(name) \
+ DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name, \
+ TP_PROTO( \
+ const struct rpcrdma_xprt *r_xprt, \
+ int rc \
+ ), \
+ TP_ARGS(r_xprt, rc))
+
DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
TP_PROTO(
const struct rpc_task *task,
@@ -333,47 +371,81 @@ TRACE_EVENT(xprtrdma_cm_event,
)
);
-TRACE_EVENT(xprtrdma_disconnect,
+TRACE_EVENT(xprtrdma_inline_thresh,
TP_PROTO(
- const struct rpcrdma_xprt *r_xprt,
- int status
+ const struct rpcrdma_xprt *r_xprt
),
- TP_ARGS(r_xprt, status),
+ TP_ARGS(r_xprt),
TP_STRUCT__entry(
__field(const void *, r_xprt)
- __field(int, status)
- __field(int, connected)
+ __field(unsigned int, inline_send)
+ __field(unsigned int, inline_recv)
+ __field(unsigned int, max_send)
+ __field(unsigned int, max_recv)
__string(addr, rpcrdma_addrstr(r_xprt))
__string(port, rpcrdma_portstr(r_xprt))
),
TP_fast_assign(
+ const struct rpcrdma_ep *ep = &r_xprt->rx_ep;
+
__entry->r_xprt = r_xprt;
- __entry->status = status;
- __entry->connected = r_xprt->rx_ep.rep_connected;
+ __entry->inline_send = ep->rep_inline_send;
+ __entry->inline_recv = ep->rep_inline_recv;
+ __entry->max_send = ep->rep_max_inline_send;
+ __entry->max_recv = ep->rep_max_inline_recv;
__assign_str(addr, rpcrdma_addrstr(r_xprt));
__assign_str(port, rpcrdma_portstr(r_xprt));
),
- TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected",
- __get_str(addr), __get_str(port),
- __entry->r_xprt, __entry->status,
- __entry->connected == 1 ? "still " : "dis"
+ TP_printk("peer=[%s]:%s r_xprt=%p neg send/recv=%u/%u, calc send/recv=%u/%u",
+ __get_str(addr), __get_str(port), __entry->r_xprt,
+ __entry->inline_send, __entry->inline_recv,
+ __entry->max_send, __entry->max_recv
)
);
-DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
-DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
+DEFINE_CONN_EVENT(connect);
+DEFINE_CONN_EVENT(disconnect);
+
DEFINE_RXPRT_EVENT(xprtrdma_create);
DEFINE_RXPRT_EVENT(xprtrdma_op_destroy);
DEFINE_RXPRT_EVENT(xprtrdma_remove);
DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
-DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
DEFINE_RXPRT_EVENT(xprtrdma_op_close);
-DEFINE_RXPRT_EVENT(xprtrdma_op_connect);
+DEFINE_RXPRT_EVENT(xprtrdma_op_setport);
+
+TRACE_EVENT(xprtrdma_op_connect,
+ TP_PROTO(
+ const struct rpcrdma_xprt *r_xprt,
+ unsigned long delay
+ ),
+
+ TP_ARGS(r_xprt, delay),
+
+ TP_STRUCT__entry(
+ __field(const void *, r_xprt)
+ __field(unsigned long, delay)
+ __string(addr, rpcrdma_addrstr(r_xprt))
+ __string(port, rpcrdma_portstr(r_xprt))
+ ),
+
+ TP_fast_assign(
+ __entry->r_xprt = r_xprt;
+ __entry->delay = delay;
+ __assign_str(addr, rpcrdma_addrstr(r_xprt));
+ __assign_str(port, rpcrdma_portstr(r_xprt));
+ ),
+
+ TP_printk("peer=[%s]:%s r_xprt=%p delay=%lu",
+ __get_str(addr), __get_str(port), __entry->r_xprt,
+ __entry->delay
+ )
+);
+
TRACE_EVENT(xprtrdma_op_set_cto,
TP_PROTO(
@@ -532,6 +604,8 @@ DEFINE_WRCH_EVENT(write);
DEFINE_WRCH_EVENT(reply);
TRACE_DEFINE_ENUM(rpcrdma_noch);
+TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
+TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
TRACE_DEFINE_ENUM(rpcrdma_readch);
TRACE_DEFINE_ENUM(rpcrdma_areadch);
TRACE_DEFINE_ENUM(rpcrdma_writech);
@@ -540,6 +614,8 @@ TRACE_DEFINE_ENUM(rpcrdma_replych);
#define xprtrdma_show_chunktype(x) \
__print_symbolic(x, \
{ rpcrdma_noch, "inline" }, \
+ { rpcrdma_noch_pullup, "pullup" }, \
+ { rpcrdma_noch_mapped, "mapped" }, \
{ rpcrdma_readch, "read list" }, \
{ rpcrdma_areadch, "*read list" }, \
{ rpcrdma_writech, "write list" }, \
@@ -653,6 +729,7 @@ TRACE_EVENT(xprtrdma_post_send,
TP_STRUCT__entry(
__field(const void *, req)
+ __field(const void *, sc)
__field(unsigned int, task_id)
__field(unsigned int, client_id)
__field(int, num_sge)
@@ -667,15 +744,15 @@ TRACE_EVENT(xprtrdma_post_send,
__entry->client_id = rqst->rq_task->tk_client ?
rqst->rq_task->tk_client->cl_clid : -1;
__entry->req = req;
- __entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
- __entry->signaled = req->rl_sendctx->sc_wr.send_flags &
- IB_SEND_SIGNALED;
+ __entry->sc = req->rl_sendctx;
+ __entry->num_sge = req->rl_wr.num_sge;
+ __entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
__entry->status = status;
),
- TP_printk("task:%u@%u req=%p (%d SGE%s) %sstatus=%d",
+ TP_printk("task:%u@%u req=%p sc=%p (%d SGE%s) %sstatus=%d",
__entry->task_id, __entry->client_id,
- __entry->req, __entry->num_sge,
+ __entry->req, __entry->sc, __entry->num_sge,
(__entry->num_sge == 1 ? "" : "s"),
(__entry->signaled ? "signaled " : ""),
__entry->status
@@ -735,6 +812,31 @@ TRACE_EVENT(xprtrdma_post_recvs,
)
);
+TRACE_EVENT(xprtrdma_post_linv,
+ TP_PROTO(
+ const struct rpcrdma_req *req,
+ int status
+ ),
+
+ TP_ARGS(req, status),
+
+ TP_STRUCT__entry(
+ __field(const void *, req)
+ __field(int, status)
+ __field(u32, xid)
+ ),
+
+ TP_fast_assign(
+ __entry->req = req;
+ __entry->status = status;
+ __entry->xid = be32_to_cpu(req->rl_slot.rq_xid);
+ ),
+
+ TP_printk("req=%p xid=0x%08x status=%d",
+ __entry->req, __entry->xid, __entry->status
+ )
+);
+
/**
** Completion events
**/
@@ -749,6 +851,7 @@ TRACE_EVENT(xprtrdma_wc_send,
TP_STRUCT__entry(
__field(const void *, req)
+ __field(const void *, sc)
__field(unsigned int, unmap_count)
__field(unsigned int, status)
__field(unsigned int, vendor_err)
@@ -756,13 +859,14 @@ TRACE_EVENT(xprtrdma_wc_send,
TP_fast_assign(
__entry->req = sc->sc_req;
+ __entry->sc = sc;
__entry->unmap_count = sc->sc_unmap_count;
__entry->status = wc->status;
__entry->vendor_err = __entry->status ? wc->vendor_err : 0;
),
- TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
- __entry->req, __entry->unmap_count,
+ TP_printk("req=%p sc=%p unmapped=%u: %s (%u/0x%x)",
+ __entry->req, __entry->sc, __entry->unmap_count,
rdma_show_wc_status(__entry->status),
__entry->status, __entry->vendor_err
)
@@ -1021,66 +1125,32 @@ DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
TRACE_EVENT(xprtrdma_fixup,
TP_PROTO(
const struct rpc_rqst *rqst,
- int len,
- int hdrlen
+ unsigned long fixup
),
- TP_ARGS(rqst, len, hdrlen),
+ TP_ARGS(rqst, fixup),
TP_STRUCT__entry(
__field(unsigned int, task_id)
__field(unsigned int, client_id)
- __field(const void *, base)
- __field(int, len)
- __field(int, hdrlen)
- ),
-
- TP_fast_assign(
- __entry->task_id = rqst->rq_task->tk_pid;
- __entry->client_id = rqst->rq_task->tk_client->cl_clid;
- __entry->base = rqst->rq_rcv_buf.head[0].iov_base;
- __entry->len = len;
- __entry->hdrlen = hdrlen;
- ),
-
- TP_printk("task:%u@%u base=%p len=%d hdrlen=%d",
- __entry->task_id, __entry->client_id,
- __entry->base, __entry->len, __entry->hdrlen
- )
-);
-
-TRACE_EVENT(xprtrdma_fixup_pg,
- TP_PROTO(
- const struct rpc_rqst *rqst,
- int pageno,
- const void *pos,
- int len,
- int curlen
- ),
-
- TP_ARGS(rqst, pageno, pos, len, curlen),
-
- TP_STRUCT__entry(
- __field(unsigned int, task_id)
- __field(unsigned int, client_id)
- __field(const void *, pos)
- __field(int, pageno)
- __field(int, len)
- __field(int, curlen)
+ __field(unsigned long, fixup)
+ __field(size_t, headlen)
+ __field(unsigned int, pagelen)
+ __field(size_t, taillen)
),
TP_fast_assign(
__entry->task_id = rqst->rq_task->tk_pid;
__entry->client_id = rqst->rq_task->tk_client->cl_clid;
- __entry->pos = pos;
- __entry->pageno = pageno;
- __entry->len = len;
- __entry->curlen = curlen;
+ __entry->fixup = fixup;
+ __entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
+ __entry->pagelen = rqst->rq_rcv_buf.page_len;
+ __entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
),
- TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d",
- __entry->task_id, __entry->client_id,
- __entry->pageno, __entry->pos, __entry->len, __entry->curlen
+ TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu",
+ __entry->task_id, __entry->client_id, __entry->fixup,
+ __entry->headlen, __entry->pagelen, __entry->taillen
)
);
@@ -1498,31 +1568,47 @@ DEFINE_ERROR_EVENT(chunk);
** Server-side RDMA API events
**/
-TRACE_EVENT(svcrdma_dma_map_page,
+DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
TP_PROTO(
const struct svcxprt_rdma *rdma,
- const void *page
+ u64 dma_addr,
+ u32 length
),
- TP_ARGS(rdma, page),
+ TP_ARGS(rdma, dma_addr, length),
TP_STRUCT__entry(
- __field(const void *, page);
+ __field(u64, dma_addr)
+ __field(u32, length)
__string(device, rdma->sc_cm_id->device->name)
__string(addr, rdma->sc_xprt.xpt_remotebuf)
),
TP_fast_assign(
- __entry->page = page;
+ __entry->dma_addr = dma_addr;
+ __entry->length = length;
__assign_str(device, rdma->sc_cm_id->device->name);
__assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
),
- TP_printk("addr=%s device=%s page=%p",
- __get_str(addr), __get_str(device), __entry->page
+ TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
+ __get_str(addr), __get_str(device),
+ __entry->dma_addr, __entry->length
)
);
+#define DEFINE_SVC_DMA_EVENT(name) \
+ DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name, \
+ TP_PROTO( \
+ const struct svcxprt_rdma *rdma,\
+ u64 dma_addr, \
+ u32 length \
+ ), \
+ TP_ARGS(rdma, dma_addr, length))
+
+DEFINE_SVC_DMA_EVENT(dma_map_page);
+DEFINE_SVC_DMA_EVENT(dma_unmap_page);
+
TRACE_EVENT(svcrdma_dma_map_rwctx,
TP_PROTO(
const struct svcxprt_rdma *rdma,
diff --git a/include/trace/events/rpm.h b/include/trace/events/rpm.h
index 26927a560eab..3c716214dab1 100644
--- a/include/trace/events/rpm.h
+++ b/include/trace/events/rpm.h
@@ -74,6 +74,12 @@ DEFINE_EVENT(rpm_internal, rpm_idle,
TP_ARGS(dev, flags)
);
+DEFINE_EVENT(rpm_internal, rpm_usage,
+
+ TP_PROTO(struct device *dev, int flags),
+
+ TP_ARGS(dev, flags)
+);
TRACE_EVENT(rpm_return_int,
TP_PROTO(struct device *dev, unsigned long ip, int ret),
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 420e80e56e55..ed168b0e2c53 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -487,7 +487,11 @@ TRACE_EVENT(sched_process_hang,
);
#endif /* CONFIG_DETECT_HUNG_TASK */
-DECLARE_EVENT_CLASS(sched_move_task_template,
+/*
+ * Tracks migration of tasks from one runqueue to another. Can be used to
+ * detect if automatic NUMA balancing is bouncing between nodes.
+ */
+TRACE_EVENT(sched_move_numa,
TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
@@ -519,23 +523,7 @@ DECLARE_EVENT_CLASS(sched_move_task_template,
__entry->dst_cpu, __entry->dst_nid)
);
-/*
- * Tracks migration of tasks from one runqueue to another. Can be used to
- * detect if automatic NUMA balancing is bouncing between nodes
- */
-DEFINE_EVENT(sched_move_task_template, sched_move_numa,
- TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
-
- TP_ARGS(tsk, src_cpu, dst_cpu)
-);
-
-DEFINE_EVENT(sched_move_task_template, sched_stick_numa,
- TP_PROTO(struct task_struct *tsk, int src_cpu, int dst_cpu),
-
- TP_ARGS(tsk, src_cpu, dst_cpu)
-);
-
-TRACE_EVENT(sched_swap_numa,
+DECLARE_EVENT_CLASS(sched_numa_pair_template,
TP_PROTO(struct task_struct *src_tsk, int src_cpu,
struct task_struct *dst_tsk, int dst_cpu),
@@ -561,11 +549,11 @@ TRACE_EVENT(sched_swap_numa,
__entry->src_ngid = task_numa_group_id(src_tsk);
__entry->src_cpu = src_cpu;
__entry->src_nid = cpu_to_node(src_cpu);
- __entry->dst_pid = task_pid_nr(dst_tsk);
- __entry->dst_tgid = task_tgid_nr(dst_tsk);
- __entry->dst_ngid = task_numa_group_id(dst_tsk);
+ __entry->dst_pid = dst_tsk ? task_pid_nr(dst_tsk) : 0;
+ __entry->dst_tgid = dst_tsk ? task_tgid_nr(dst_tsk) : 0;
+ __entry->dst_ngid = dst_tsk ? task_numa_group_id(dst_tsk) : 0;
__entry->dst_cpu = dst_cpu;
- __entry->dst_nid = cpu_to_node(dst_cpu);
+ __entry->dst_nid = dst_cpu >= 0 ? cpu_to_node(dst_cpu) : -1;
),
TP_printk("src_pid=%d src_tgid=%d src_ngid=%d src_cpu=%d src_nid=%d dst_pid=%d dst_tgid=%d dst_ngid=%d dst_cpu=%d dst_nid=%d",
@@ -575,6 +563,23 @@ TRACE_EVENT(sched_swap_numa,
__entry->dst_cpu, __entry->dst_nid)
);
+DEFINE_EVENT(sched_numa_pair_template, sched_stick_numa,
+
+ TP_PROTO(struct task_struct *src_tsk, int src_cpu,
+ struct task_struct *dst_tsk, int dst_cpu),
+
+ TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
+);
+
+DEFINE_EVENT(sched_numa_pair_template, sched_swap_numa,
+
+ TP_PROTO(struct task_struct *src_tsk, int src_cpu,
+ struct task_struct *dst_tsk, int dst_cpu),
+
+ TP_ARGS(src_tsk, src_cpu, dst_tsk, dst_cpu)
+);
+
+
/*
* Tracepoint for waking a polling cpu without an IPI.
*/
@@ -613,6 +618,10 @@ DECLARE_TRACE(pelt_dl_tp,
TP_PROTO(struct rq *rq),
TP_ARGS(rq));
+DECLARE_TRACE(pelt_thermal_tp,
+ TP_PROTO(struct rq *rq),
+ TP_ARGS(rq));
+
DECLARE_TRACE(pelt_irq_tp,
TP_PROTO(struct rq *rq),
TP_ARGS(rq));
diff --git a/include/trace/events/scmi.h b/include/trace/events/scmi.h
new file mode 100644
index 000000000000..f076c430d243
--- /dev/null
+++ b/include/trace/events/scmi.h
@@ -0,0 +1,90 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM scmi
+
+#if !defined(_TRACE_SCMI_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_SCMI_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(scmi_xfer_begin,
+ TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
+ bool poll),
+ TP_ARGS(transfer_id, msg_id, protocol_id, seq, poll),
+
+ TP_STRUCT__entry(
+ __field(int, transfer_id)
+ __field(u8, msg_id)
+ __field(u8, protocol_id)
+ __field(u16, seq)
+ __field(bool, poll)
+ ),
+
+ TP_fast_assign(
+ __entry->transfer_id = transfer_id;
+ __entry->msg_id = msg_id;
+ __entry->protocol_id = protocol_id;
+ __entry->seq = seq;
+ __entry->poll = poll;
+ ),
+
+ TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u poll=%u",
+ __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
+ __entry->seq, __entry->poll)
+);
+
+TRACE_EVENT(scmi_xfer_end,
+ TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
+ u32 status),
+ TP_ARGS(transfer_id, msg_id, protocol_id, seq, status),
+
+ TP_STRUCT__entry(
+ __field(int, transfer_id)
+ __field(u8, msg_id)
+ __field(u8, protocol_id)
+ __field(u16, seq)
+ __field(u32, status)
+ ),
+
+ TP_fast_assign(
+ __entry->transfer_id = transfer_id;
+ __entry->msg_id = msg_id;
+ __entry->protocol_id = protocol_id;
+ __entry->seq = seq;
+ __entry->status = status;
+ ),
+
+ TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u status=%u",
+ __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
+ __entry->seq, __entry->status)
+);
+
+TRACE_EVENT(scmi_rx_done,
+ TP_PROTO(int transfer_id, u8 msg_id, u8 protocol_id, u16 seq,
+ u8 msg_type),
+ TP_ARGS(transfer_id, msg_id, protocol_id, seq, msg_type),
+
+ TP_STRUCT__entry(
+ __field(int, transfer_id)
+ __field(u8, msg_id)
+ __field(u8, protocol_id)
+ __field(u16, seq)
+ __field(u8, msg_type)
+ ),
+
+ TP_fast_assign(
+ __entry->transfer_id = transfer_id;
+ __entry->msg_id = msg_id;
+ __entry->protocol_id = protocol_id;
+ __entry->seq = seq;
+ __entry->msg_type = msg_type;
+ ),
+
+ TP_printk("transfer_id=%d msg_id=%u protocol_id=%u seq=%u msg_type=%u",
+ __entry->transfer_id, __entry->msg_id, __entry->protocol_id,
+ __entry->seq, __entry->msg_type)
+);
+#endif /* _TRACE_SCMI_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/sctp.h b/include/trace/events/sctp.h
index 7475c7be165a..d4aac3436595 100644
--- a/include/trace/events/sctp.h
+++ b/include/trace/events/sctp.h
@@ -75,15 +75,6 @@ TRACE_EVENT(sctp_probe,
__entry->pathmtu = asoc->pathmtu;
__entry->rwnd = asoc->peer.rwnd;
__entry->unack_data = asoc->unack_data;
-
- if (trace_sctp_probe_path_enabled()) {
- struct sctp_transport *sp;
-
- list_for_each_entry(sp, &asoc->peer.transport_addr_list,
- transports) {
- trace_sctp_probe_path(sp, asoc);
- }
- }
),
TP_printk("asoc=%#llx mark=%#x bind_port=%d peer_port=%d pathmtu=%d "
diff --git a/include/trace/events/sock.h b/include/trace/events/sock.h
index 51fe9f6719eb..a966d4b5ab37 100644
--- a/include/trace/events/sock.h
+++ b/include/trace/events/sock.h
@@ -19,7 +19,8 @@
#define inet_protocol_names \
EM(IPPROTO_TCP) \
EM(IPPROTO_DCCP) \
- EMe(IPPROTO_SCTP)
+ EM(IPPROTO_SCTP) \
+ EMe(IPPROTO_MPTCP)
#define tcp_state_names \
EM(TCP_ESTABLISHED) \
@@ -147,7 +148,7 @@ TRACE_EVENT(inet_sock_set_state,
__field(__u16, sport)
__field(__u16, dport)
__field(__u16, family)
- __field(__u8, protocol)
+ __field(__u16, protocol)
__array(__u8, saddr, 4)
__array(__u8, daddr, 4)
__array(__u8, saddr_v6, 16)
diff --git a/include/trace/events/sunrpc.h b/include/trace/events/sunrpc.h
index ffa3c51dbb1a..ee993575d2fa 100644
--- a/include/trace/events/sunrpc.h
+++ b/include/trace/events/sunrpc.h
@@ -14,6 +14,26 @@
#include <linux/net.h>
#include <linux/tracepoint.h>
+TRACE_DEFINE_ENUM(RPC_AUTH_OK);
+TRACE_DEFINE_ENUM(RPC_AUTH_BADCRED);
+TRACE_DEFINE_ENUM(RPC_AUTH_REJECTEDCRED);
+TRACE_DEFINE_ENUM(RPC_AUTH_BADVERF);
+TRACE_DEFINE_ENUM(RPC_AUTH_REJECTEDVERF);
+TRACE_DEFINE_ENUM(RPC_AUTH_TOOWEAK);
+TRACE_DEFINE_ENUM(RPCSEC_GSS_CREDPROBLEM);
+TRACE_DEFINE_ENUM(RPCSEC_GSS_CTXPROBLEM);
+
+#define rpc_show_auth_stat(status) \
+ __print_symbolic(status, \
+ { RPC_AUTH_OK, "AUTH_OK" }, \
+ { RPC_AUTH_BADCRED, "BADCRED" }, \
+ { RPC_AUTH_REJECTEDCRED, "REJECTEDCRED" }, \
+ { RPC_AUTH_BADVERF, "BADVERF" }, \
+ { RPC_AUTH_REJECTEDVERF, "REJECTEDVERF" }, \
+ { RPC_AUTH_TOOWEAK, "TOOWEAK" }, \
+ { RPCSEC_GSS_CREDPROBLEM, "GSS_CREDPROBLEM" }, \
+ { RPCSEC_GSS_CTXPROBLEM, "GSS_CTXPROBLEM" }) \
+
DECLARE_EVENT_CLASS(rpc_task_status,
TP_PROTO(const struct rpc_task *task),
@@ -165,6 +185,8 @@ DECLARE_EVENT_CLASS(rpc_task_running,
DEFINE_RPC_RUNNING_EVENT(begin);
DEFINE_RPC_RUNNING_EVENT(run_action);
DEFINE_RPC_RUNNING_EVENT(complete);
+DEFINE_RPC_RUNNING_EVENT(signalled);
+DEFINE_RPC_RUNNING_EVENT(end);
DECLARE_EVENT_CLASS(rpc_task_queued,
@@ -777,6 +799,99 @@ TRACE_EVENT(xprt_ping,
__get_str(addr), __get_str(port), __entry->status)
);
+DECLARE_EVENT_CLASS(xprt_writelock_event,
+ TP_PROTO(
+ const struct rpc_xprt *xprt, const struct rpc_task *task
+ ),
+
+ TP_ARGS(xprt, task),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(unsigned int, snd_task_id)
+ ),
+
+ TP_fast_assign(
+ if (task) {
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client ?
+ task->tk_client->cl_clid : -1;
+ } else {
+ __entry->task_id = -1;
+ __entry->client_id = -1;
+ }
+ __entry->snd_task_id = xprt->snd_task ?
+ xprt->snd_task->tk_pid : -1;
+ ),
+
+ TP_printk("task:%u@%u snd_task:%u",
+ __entry->task_id, __entry->client_id,
+ __entry->snd_task_id)
+);
+
+#define DEFINE_WRITELOCK_EVENT(name) \
+ DEFINE_EVENT(xprt_writelock_event, xprt_##name, \
+ TP_PROTO( \
+ const struct rpc_xprt *xprt, \
+ const struct rpc_task *task \
+ ), \
+ TP_ARGS(xprt, task))
+
+DEFINE_WRITELOCK_EVENT(reserve_xprt);
+DEFINE_WRITELOCK_EVENT(release_xprt);
+
+DECLARE_EVENT_CLASS(xprt_cong_event,
+ TP_PROTO(
+ const struct rpc_xprt *xprt, const struct rpc_task *task
+ ),
+
+ TP_ARGS(xprt, task),
+
+ TP_STRUCT__entry(
+ __field(unsigned int, task_id)
+ __field(unsigned int, client_id)
+ __field(unsigned int, snd_task_id)
+ __field(unsigned long, cong)
+ __field(unsigned long, cwnd)
+ __field(bool, wait)
+ ),
+
+ TP_fast_assign(
+ if (task) {
+ __entry->task_id = task->tk_pid;
+ __entry->client_id = task->tk_client ?
+ task->tk_client->cl_clid : -1;
+ } else {
+ __entry->task_id = -1;
+ __entry->client_id = -1;
+ }
+ __entry->snd_task_id = xprt->snd_task ?
+ xprt->snd_task->tk_pid : -1;
+ __entry->cong = xprt->cong;
+ __entry->cwnd = xprt->cwnd;
+ __entry->wait = test_bit(XPRT_CWND_WAIT, &xprt->state);
+ ),
+
+ TP_printk("task:%u@%u snd_task:%u cong=%lu cwnd=%lu%s",
+ __entry->task_id, __entry->client_id,
+ __entry->snd_task_id, __entry->cong, __entry->cwnd,
+ __entry->wait ? " (wait)" : "")
+);
+
+#define DEFINE_CONG_EVENT(name) \
+ DEFINE_EVENT(xprt_cong_event, xprt_##name, \
+ TP_PROTO( \
+ const struct rpc_xprt *xprt, \
+ const struct rpc_task *task \
+ ), \
+ TP_ARGS(xprt, task))
+
+DEFINE_CONG_EVENT(reserve_cong);
+DEFINE_CONG_EVENT(release_cong);
+DEFINE_CONG_EVENT(get_cong);
+DEFINE_CONG_EVENT(put_cong);
+
TRACE_EVENT(xs_stream_read_data,
TP_PROTO(struct rpc_xprt *xprt, ssize_t err, size_t total),
@@ -866,6 +981,41 @@ TRACE_EVENT(svc_recv,
show_rqstp_flags(__entry->flags))
);
+#define svc_show_status(status) \
+ __print_symbolic(status, \
+ { SVC_GARBAGE, "SVC_GARBAGE" }, \
+ { SVC_SYSERR, "SVC_SYSERR" }, \
+ { SVC_VALID, "SVC_VALID" }, \
+ { SVC_NEGATIVE, "SVC_NEGATIVE" }, \
+ { SVC_OK, "SVC_OK" }, \
+ { SVC_DROP, "SVC_DROP" }, \
+ { SVC_CLOSE, "SVC_CLOSE" }, \
+ { SVC_DENIED, "SVC_DENIED" }, \
+ { SVC_PENDING, "SVC_PENDING" }, \
+ { SVC_COMPLETE, "SVC_COMPLETE" })
+
+TRACE_EVENT(svc_authenticate,
+ TP_PROTO(const struct svc_rqst *rqst, int auth_res, __be32 auth_stat),
+
+ TP_ARGS(rqst, auth_res, auth_stat),
+
+ TP_STRUCT__entry(
+ __field(u32, xid)
+ __field(unsigned long, svc_status)
+ __field(unsigned long, auth_stat)
+ ),
+
+ TP_fast_assign(
+ __entry->xid = be32_to_cpu(rqst->rq_xid);
+ __entry->svc_status = auth_res;
+ __entry->auth_stat = be32_to_cpu(auth_stat);
+ ),
+
+ TP_printk("xid=0x%08x auth_res=%s auth_stat=%s",
+ __entry->xid, svc_show_status(__entry->svc_status),
+ rpc_show_auth_stat(__entry->auth_stat))
+);
+
TRACE_EVENT(svc_process,
TP_PROTO(const struct svc_rqst *rqst, const char *name),
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index b7a904825e7d..19abb6c3eb73 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -303,7 +303,7 @@ DEFINE_EVENT(hrtimer_class, hrtimer_cancel,
*/
TRACE_EVENT(itimer_state,
- TP_PROTO(int which, const struct itimerval *const value,
+ TP_PROTO(int which, const struct itimerspec64 *const value,
unsigned long long expires),
TP_ARGS(which, value, expires),
@@ -312,24 +312,24 @@ TRACE_EVENT(itimer_state,
__field( int, which )
__field( unsigned long long, expires )
__field( long, value_sec )
- __field( long, value_usec )
+ __field( long, value_nsec )
__field( long, interval_sec )
- __field( long, interval_usec )
+ __field( long, interval_nsec )
),
TP_fast_assign(
__entry->which = which;
__entry->expires = expires;
__entry->value_sec = value->it_value.tv_sec;
- __entry->value_usec = value->it_value.tv_usec;
+ __entry->value_nsec = value->it_value.tv_nsec;
__entry->interval_sec = value->it_interval.tv_sec;
- __entry->interval_usec = value->it_interval.tv_usec;
+ __entry->interval_nsec = value->it_interval.tv_nsec;
),
- TP_printk("which=%d expires=%llu it_value=%ld.%ld it_interval=%ld.%ld",
+ TP_printk("which=%d expires=%llu it_value=%ld.%06ld it_interval=%ld.%06ld",
__entry->which, __entry->expires,
- __entry->value_sec, __entry->value_usec,
- __entry->interval_sec, __entry->interval_usec)
+ __entry->value_sec, __entry->value_nsec / NSEC_PER_USEC,
+ __entry->interval_sec, __entry->interval_nsec / NSEC_PER_USEC)
);
/**
@@ -367,7 +367,8 @@ TRACE_EVENT(itimer_expire,
tick_dep_name(POSIX_TIMER) \
tick_dep_name(PERF_EVENTS) \
tick_dep_name(SCHED) \
- tick_dep_name_end(CLOCK_UNSTABLE)
+ tick_dep_name(CLOCK_UNSTABLE) \
+ tick_dep_name_end(RCU)
#undef tick_dep_name
#undef tick_dep_mask_name
diff --git a/include/trace/events/v4l2.h b/include/trace/events/v4l2.h
index 83860de120e3..248bc09bfc99 100644
--- a/include/trace/events/v4l2.h
+++ b/include/trace/events/v4l2.h
@@ -130,7 +130,7 @@ DECLARE_EVENT_CLASS(v4l2_event_class,
__entry->bytesused = buf->bytesused;
__entry->flags = buf->flags;
__entry->field = buf->field;
- __entry->timestamp = timeval_to_ns(&buf->timestamp);
+ __entry->timestamp = v4l2_buffer_get_timestamp(buf);
__entry->timecode_type = buf->timecode.type;
__entry->timecode_flags = buf->timecode.flags;
__entry->timecode_frames = buf->timecode.frames;
diff --git a/include/trace/events/wbt.h b/include/trace/events/wbt.h
index b048694070e2..37342a13c9cb 100644
--- a/include/trace/events/wbt.h
+++ b/include/trace/events/wbt.h
@@ -33,7 +33,8 @@ TRACE_EVENT(wbt_stat,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strlcpy(__entry->name, dev_name(bdi->dev),
+ ARRAY_SIZE(__entry->name));
__entry->rmean = stat[0].mean;
__entry->rmin = stat[0].min;
__entry->rmax = stat[0].max;
@@ -67,7 +68,8 @@ TRACE_EVENT(wbt_lat,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strlcpy(__entry->name, dev_name(bdi->dev),
+ ARRAY_SIZE(__entry->name));
__entry->lat = div_u64(lat, 1000);
),
@@ -103,7 +105,8 @@ TRACE_EVENT(wbt_step,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strlcpy(__entry->name, dev_name(bdi->dev),
+ ARRAY_SIZE(__entry->name));
__entry->msg = msg;
__entry->step = step;
__entry->window = div_u64(window, 1000);
@@ -138,7 +141,8 @@ TRACE_EVENT(wbt_timer,
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(bdi->dev), 32);
+ strlcpy(__entry->name, dev_name(bdi->dev),
+ ARRAY_SIZE(__entry->name));
__entry->status = status;
__entry->step = step;
__entry->inflight = inflight;
diff --git a/include/trace/events/workqueue.h b/include/trace/events/workqueue.h
index e172549283be..9b8ae961acc5 100644
--- a/include/trace/events/workqueue.h
+++ b/include/trace/events/workqueue.h
@@ -8,23 +8,6 @@
#include <linux/tracepoint.h>
#include <linux/workqueue.h>
-DECLARE_EVENT_CLASS(workqueue_work,
-
- TP_PROTO(struct work_struct *work),
-
- TP_ARGS(work),
-
- TP_STRUCT__entry(
- __field( void *, work )
- ),
-
- TP_fast_assign(
- __entry->work = work;
- ),
-
- TP_printk("work struct %p", __entry->work)
-);
-
struct pool_workqueue;
/**
@@ -73,11 +56,21 @@ TRACE_EVENT(workqueue_queue_work,
* which happens immediately after queueing unless @max_active limit
* is reached.
*/
-DEFINE_EVENT(workqueue_work, workqueue_activate_work,
+TRACE_EVENT(workqueue_activate_work,
TP_PROTO(struct work_struct *work),
- TP_ARGS(work)
+ TP_ARGS(work),
+
+ TP_STRUCT__entry(
+ __field( void *, work )
+ ),
+
+ TP_fast_assign(
+ __entry->work = work;
+ ),
+
+ TP_printk("work struct %p", __entry->work)
);
/**
@@ -108,14 +101,27 @@ TRACE_EVENT(workqueue_execute_start,
/**
* workqueue_execute_end - called immediately after the workqueue callback
* @work: pointer to struct work_struct
+ * @function: pointer to worker function
*
* Allows to track workqueue execution.
*/
-DEFINE_EVENT(workqueue_work, workqueue_execute_end,
+TRACE_EVENT(workqueue_execute_end,
- TP_PROTO(struct work_struct *work),
+ TP_PROTO(struct work_struct *work, work_func_t function),
- TP_ARGS(work)
+ TP_ARGS(work, function),
+
+ TP_STRUCT__entry(
+ __field( void *, work )
+ __field( void *, function)
+ ),
+
+ TP_fast_assign(
+ __entry->work = work;
+ __entry->function = function;
+ ),
+
+ TP_printk("work struct %p: function %ps", __entry->work, __entry->function)
);
#endif /* _TRACE_WORKQUEUE_H */
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index c2ce6480b4b1..d94def25e4dc 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -61,21 +61,21 @@ DECLARE_EVENT_CLASS(writeback_page_template,
TP_STRUCT__entry (
__array(char, name, 32)
- __field(unsigned long, ino)
+ __field(ino_t, ino)
__field(pgoff_t, index)
),
TP_fast_assign(
strscpy_pad(__entry->name,
- mapping ? dev_name(inode_to_bdi(mapping->host)->dev) : "(unknown)",
- 32);
+ bdi_dev_name(mapping ? inode_to_bdi(mapping->host) :
+ NULL), 32);
__entry->ino = mapping ? mapping->host->i_ino : 0;
__entry->index = page->index;
),
TP_printk("bdi %s: ino=%lu index=%lu",
__entry->name,
- __entry->ino,
+ (unsigned long)__entry->ino,
__entry->index
)
);
@@ -102,7 +102,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
TP_STRUCT__entry (
__array(char, name, 32)
- __field(unsigned long, ino)
+ __field(ino_t, ino)
__field(unsigned long, state)
__field(unsigned long, flags)
),
@@ -111,8 +111,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
struct backing_dev_info *bdi = inode_to_bdi(inode);
/* may be called for files on pseudo FSes w/ unregistered bdi */
- strscpy_pad(__entry->name,
- bdi->dev ? dev_name(bdi->dev) : "(unknown)", 32);
+ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->flags = flags;
@@ -120,7 +119,7 @@ DECLARE_EVENT_CLASS(writeback_dirty_inode_template,
TP_printk("bdi %s: ino=%lu state=%s flags=%s",
__entry->name,
- __entry->ino,
+ (unsigned long)__entry->ino,
show_inode_state(__entry->state),
show_inode_state(__entry->flags)
)
@@ -150,28 +149,28 @@ DEFINE_EVENT(writeback_dirty_inode_template, writeback_dirty_inode,
#ifdef CREATE_TRACE_POINTS
#ifdef CONFIG_CGROUP_WRITEBACK
-static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
+static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
{
- return wb->memcg_css->cgroup->kn->id.ino;
+ return cgroup_ino(wb->memcg_css->cgroup);
}
-static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
+static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
{
if (wbc->wb)
return __trace_wb_assign_cgroup(wbc->wb);
else
- return -1U;
+ return 1;
}
#else /* CONFIG_CGROUP_WRITEBACK */
-static inline unsigned int __trace_wb_assign_cgroup(struct bdi_writeback *wb)
+static inline ino_t __trace_wb_assign_cgroup(struct bdi_writeback *wb)
{
- return -1U;
+ return 1;
}
-static inline unsigned int __trace_wbc_assign_cgroup(struct writeback_control *wbc)
+static inline ino_t __trace_wbc_assign_cgroup(struct writeback_control *wbc)
{
- return -1U;
+ return 1;
}
#endif /* CONFIG_CGROUP_WRITEBACK */
@@ -187,22 +186,22 @@ TRACE_EVENT(inode_foreign_history,
TP_STRUCT__entry(
__array(char, name, 32)
- __field(unsigned long, ino)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, ino)
+ __field(ino_t, cgroup_ino)
__field(unsigned int, history)
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(inode_to_bdi(inode)->dev), 32);
+ strncpy(__entry->name, bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
__entry->history = history;
),
- TP_printk("bdi %s: ino=%lu cgroup_ino=%u history=0x%x",
+ TP_printk("bdi %s: ino=%lu cgroup_ino=%lu history=0x%x",
__entry->name,
- __entry->ino,
- __entry->cgroup_ino,
+ (unsigned long)__entry->ino,
+ (unsigned long)__entry->cgroup_ino,
__entry->history
)
);
@@ -216,23 +215,23 @@ TRACE_EVENT(inode_switch_wbs,
TP_STRUCT__entry(
__array(char, name, 32)
- __field(unsigned long, ino)
- __field(unsigned int, old_cgroup_ino)
- __field(unsigned int, new_cgroup_ino)
+ __field(ino_t, ino)
+ __field(ino_t, old_cgroup_ino)
+ __field(ino_t, new_cgroup_ino)
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(old_wb->bdi->dev), 32);
+ strncpy(__entry->name, bdi_dev_name(old_wb->bdi), 32);
__entry->ino = inode->i_ino;
__entry->old_cgroup_ino = __trace_wb_assign_cgroup(old_wb);
__entry->new_cgroup_ino = __trace_wb_assign_cgroup(new_wb);
),
- TP_printk("bdi %s: ino=%lu old_cgroup_ino=%u new_cgroup_ino=%u",
+ TP_printk("bdi %s: ino=%lu old_cgroup_ino=%lu new_cgroup_ino=%lu",
__entry->name,
- __entry->ino,
- __entry->old_cgroup_ino,
- __entry->new_cgroup_ino
+ (unsigned long)__entry->ino,
+ (unsigned long)__entry->old_cgroup_ino,
+ (unsigned long)__entry->new_cgroup_ino
)
);
@@ -245,31 +244,31 @@ TRACE_EVENT(track_foreign_dirty,
TP_STRUCT__entry(
__array(char, name, 32)
__field(u64, bdi_id)
- __field(unsigned long, ino)
+ __field(ino_t, ino)
__field(unsigned int, memcg_id)
- __field(unsigned int, cgroup_ino)
- __field(unsigned int, page_cgroup_ino)
+ __field(ino_t, cgroup_ino)
+ __field(ino_t, page_cgroup_ino)
),
TP_fast_assign(
struct address_space *mapping = page_mapping(page);
struct inode *inode = mapping ? mapping->host : NULL;
- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
+ strncpy(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->bdi_id = wb->bdi->id;
__entry->ino = inode ? inode->i_ino : 0;
__entry->memcg_id = wb->memcg_css->id;
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
- __entry->page_cgroup_ino = page->mem_cgroup->css.cgroup->kn->id.ino;
+ __entry->page_cgroup_ino = cgroup_ino(page->mem_cgroup->css.cgroup);
),
- TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%u page_cgroup_ino=%u",
+ TP_printk("bdi %s[%llu]: ino=%lu memcg_id=%u cgroup_ino=%lu page_cgroup_ino=%lu",
__entry->name,
__entry->bdi_id,
- __entry->ino,
+ (unsigned long)__entry->ino,
__entry->memcg_id,
- __entry->cgroup_ino,
- __entry->page_cgroup_ino
+ (unsigned long)__entry->cgroup_ino,
+ (unsigned long)__entry->page_cgroup_ino
)
);
@@ -282,21 +281,21 @@ TRACE_EVENT(flush_foreign,
TP_STRUCT__entry(
__array(char, name, 32)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, cgroup_ino)
__field(unsigned int, frn_bdi_id)
__field(unsigned int, frn_memcg_id)
),
TP_fast_assign(
- strncpy(__entry->name, dev_name(wb->bdi->dev), 32);
+ strncpy(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
__entry->frn_bdi_id = frn_bdi_id;
__entry->frn_memcg_id = frn_memcg_id;
),
- TP_printk("bdi %s: cgroup_ino=%u frn_bdi_id=%u frn_memcg_id=%u",
+ TP_printk("bdi %s: cgroup_ino=%lu frn_bdi_id=%u frn_memcg_id=%u",
__entry->name,
- __entry->cgroup_ino,
+ (unsigned long)__entry->cgroup_ino,
__entry->frn_bdi_id,
__entry->frn_memcg_id
)
@@ -311,24 +310,24 @@ DECLARE_EVENT_CLASS(writeback_write_inode_template,
TP_STRUCT__entry (
__array(char, name, 32)
- __field(unsigned long, ino)
+ __field(ino_t, ino)
__field(int, sync_mode)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, cgroup_ino)
),
TP_fast_assign(
strscpy_pad(__entry->name,
- dev_name(inode_to_bdi(inode)->dev), 32);
+ bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
__entry->sync_mode = wbc->sync_mode;
__entry->cgroup_ino = __trace_wbc_assign_cgroup(wbc);
),
- TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%u",
+ TP_printk("bdi %s: ino=%lu sync_mode=%d cgroup_ino=%lu",
__entry->name,
- __entry->ino,
+ (unsigned long)__entry->ino,
__entry->sync_mode,
- __entry->cgroup_ino
+ (unsigned long)__entry->cgroup_ino
)
);
@@ -358,12 +357,10 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__field(int, range_cyclic)
__field(int, for_background)
__field(int, reason)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, cgroup_ino)
),
TP_fast_assign(
- strscpy_pad(__entry->name,
- wb->bdi->dev ? dev_name(wb->bdi->dev) :
- "(unknown)", 32);
+ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->nr_pages = work->nr_pages;
__entry->sb_dev = work->sb ? work->sb->s_dev : 0;
__entry->sync_mode = work->sync_mode;
@@ -374,7 +371,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
TP_printk("bdi %s: sb_dev %d:%d nr_pages=%ld sync_mode=%d "
- "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%u",
+ "kupdate=%d range_cyclic=%d background=%d reason=%s cgroup_ino=%lu",
__entry->name,
MAJOR(__entry->sb_dev), MINOR(__entry->sb_dev),
__entry->nr_pages,
@@ -383,7 +380,7 @@ DECLARE_EVENT_CLASS(writeback_work_class,
__entry->range_cyclic,
__entry->for_background,
__print_symbolic(__entry->reason, WB_WORK_REASON),
- __entry->cgroup_ino
+ (unsigned long)__entry->cgroup_ino
)
);
#define DEFINE_WRITEBACK_WORK_EVENT(name) \
@@ -413,15 +410,15 @@ DECLARE_EVENT_CLASS(writeback_class,
TP_ARGS(wb),
TP_STRUCT__entry(
__array(char, name, 32)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, cgroup_ino)
),
TP_fast_assign(
- strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
- TP_printk("bdi %s: cgroup_ino=%u",
+ TP_printk("bdi %s: cgroup_ino=%lu",
__entry->name,
- __entry->cgroup_ino
+ (unsigned long)__entry->cgroup_ino
)
);
#define DEFINE_WRITEBACK_EVENT(name) \
@@ -438,7 +435,7 @@ TRACE_EVENT(writeback_bdi_register,
__array(char, name, 32)
),
TP_fast_assign(
- strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
),
TP_printk("bdi %s",
__entry->name
@@ -459,11 +456,11 @@ DECLARE_EVENT_CLASS(wbc_class,
__field(int, range_cyclic)
__field(long, range_start)
__field(long, range_end)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, cgroup_ino)
),
TP_fast_assign(
- strscpy_pad(__entry->name, dev_name(bdi->dev), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(bdi), 32);
__entry->nr_to_write = wbc->nr_to_write;
__entry->pages_skipped = wbc->pages_skipped;
__entry->sync_mode = wbc->sync_mode;
@@ -478,7 +475,7 @@ DECLARE_EVENT_CLASS(wbc_class,
TP_printk("bdi %s: towrt=%ld skip=%ld mode=%d kupd=%d "
"bgrd=%d reclm=%d cyclic=%d "
- "start=0x%lx end=0x%lx cgroup_ino=%u",
+ "start=0x%lx end=0x%lx cgroup_ino=%lu",
__entry->name,
__entry->nr_to_write,
__entry->pages_skipped,
@@ -489,7 +486,7 @@ DECLARE_EVENT_CLASS(wbc_class,
__entry->range_cyclic,
__entry->range_start,
__entry->range_end,
- __entry->cgroup_ino
+ (unsigned long)__entry->cgroup_ino
)
)
@@ -510,11 +507,11 @@ TRACE_EVENT(writeback_queue_io,
__field(long, age)
__field(int, moved)
__field(int, reason)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, cgroup_ino)
),
TP_fast_assign(
unsigned long *older_than_this = work->older_than_this;
- strscpy_pad(__entry->name, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->name, bdi_dev_name(wb->bdi), 32);
__entry->older = older_than_this ? *older_than_this : 0;
__entry->age = older_than_this ?
(jiffies - *older_than_this) * 1000 / HZ : -1;
@@ -522,13 +519,13 @@ TRACE_EVENT(writeback_queue_io,
__entry->reason = work->reason;
__entry->cgroup_ino = __trace_wb_assign_cgroup(wb);
),
- TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%u",
+ TP_printk("bdi %s: older=%lu age=%ld enqueue=%d reason=%s cgroup_ino=%lu",
__entry->name,
__entry->older, /* older_than_this in jiffies */
__entry->age, /* older_than_this in relative milliseconds */
__entry->moved,
__print_symbolic(__entry->reason, WB_WORK_REASON),
- __entry->cgroup_ino
+ (unsigned long)__entry->cgroup_ino
)
);
@@ -596,11 +593,11 @@ TRACE_EVENT(bdi_dirty_ratelimit,
__field(unsigned long, dirty_ratelimit)
__field(unsigned long, task_ratelimit)
__field(unsigned long, balanced_dirty_ratelimit)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, cgroup_ino)
),
TP_fast_assign(
- strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
__entry->write_bw = KBps(wb->write_bandwidth);
__entry->avg_write_bw = KBps(wb->avg_write_bandwidth);
__entry->dirty_rate = KBps(dirty_rate);
@@ -614,7 +611,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
TP_printk("bdi %s: "
"write_bw=%lu awrite_bw=%lu dirty_rate=%lu "
"dirty_ratelimit=%lu task_ratelimit=%lu "
- "balanced_dirty_ratelimit=%lu cgroup_ino=%u",
+ "balanced_dirty_ratelimit=%lu cgroup_ino=%lu",
__entry->bdi,
__entry->write_bw, /* write bandwidth */
__entry->avg_write_bw, /* avg write bandwidth */
@@ -622,7 +619,7 @@ TRACE_EVENT(bdi_dirty_ratelimit,
__entry->dirty_ratelimit, /* base ratelimit */
__entry->task_ratelimit, /* ratelimit with position control */
__entry->balanced_dirty_ratelimit, /* the balanced ratelimit */
- __entry->cgroup_ino
+ (unsigned long)__entry->cgroup_ino
)
);
@@ -660,12 +657,12 @@ TRACE_EVENT(balance_dirty_pages,
__field( long, pause)
__field(unsigned long, period)
__field( long, think)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, cgroup_ino)
),
TP_fast_assign(
unsigned long freerun = (thresh + bg_thresh) / 2;
- strscpy_pad(__entry->bdi, dev_name(wb->bdi->dev), 32);
+ strscpy_pad(__entry->bdi, bdi_dev_name(wb->bdi), 32);
__entry->limit = global_wb_domain.dirty_limit;
__entry->setpoint = (global_wb_domain.dirty_limit +
@@ -692,7 +689,7 @@ TRACE_EVENT(balance_dirty_pages,
"bdi_setpoint=%lu bdi_dirty=%lu "
"dirty_ratelimit=%lu task_ratelimit=%lu "
"dirtied=%u dirtied_pause=%u "
- "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%u",
+ "paused=%lu pause=%ld period=%lu think=%ld cgroup_ino=%lu",
__entry->bdi,
__entry->limit,
__entry->setpoint,
@@ -707,7 +704,7 @@ TRACE_EVENT(balance_dirty_pages,
__entry->pause, /* ms */
__entry->period, /* ms */
__entry->think, /* ms */
- __entry->cgroup_ino
+ (unsigned long)__entry->cgroup_ino
)
);
@@ -718,28 +715,28 @@ TRACE_EVENT(writeback_sb_inodes_requeue,
TP_STRUCT__entry(
__array(char, name, 32)
- __field(unsigned long, ino)
+ __field(ino_t, ino)
__field(unsigned long, state)
__field(unsigned long, dirtied_when)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, cgroup_ino)
),
TP_fast_assign(
strscpy_pad(__entry->name,
- dev_name(inode_to_bdi(inode)->dev), 32);
+ bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;
__entry->cgroup_ino = __trace_wb_assign_cgroup(inode_to_wb(inode));
),
- TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%u",
+ TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu cgroup_ino=%lu",
__entry->name,
- __entry->ino,
+ (unsigned long)__entry->ino,
show_inode_state(__entry->state),
__entry->dirtied_when,
(jiffies - __entry->dirtied_when) / HZ,
- __entry->cgroup_ino
+ (unsigned long)__entry->cgroup_ino
)
);
@@ -789,18 +786,18 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
TP_STRUCT__entry(
__array(char, name, 32)
- __field(unsigned long, ino)
+ __field(ino_t, ino)
__field(unsigned long, state)
__field(unsigned long, dirtied_when)
__field(unsigned long, writeback_index)
__field(long, nr_to_write)
__field(unsigned long, wrote)
- __field(unsigned int, cgroup_ino)
+ __field(ino_t, cgroup_ino)
),
TP_fast_assign(
strscpy_pad(__entry->name,
- dev_name(inode_to_bdi(inode)->dev), 32);
+ bdi_dev_name(inode_to_bdi(inode)), 32);
__entry->ino = inode->i_ino;
__entry->state = inode->i_state;
__entry->dirtied_when = inode->dirtied_when;
@@ -811,16 +808,16 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
),
TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu "
- "index=%lu to_write=%ld wrote=%lu cgroup_ino=%u",
+ "index=%lu to_write=%ld wrote=%lu cgroup_ino=%lu",
__entry->name,
- __entry->ino,
+ (unsigned long)__entry->ino,
show_inode_state(__entry->state),
__entry->dirtied_when,
(jiffies - __entry->dirtied_when) / HZ,
__entry->writeback_index,
__entry->nr_to_write,
__entry->wrote,
- __entry->cgroup_ino
+ (unsigned long)__entry->cgroup_ino
)
);
@@ -845,7 +842,7 @@ DECLARE_EVENT_CLASS(writeback_inode_template,
TP_STRUCT__entry(
__field( dev_t, dev )
- __field(unsigned long, ino )
+ __field( ino_t, ino )
__field(unsigned long, state )
__field( __u16, mode )
__field(unsigned long, dirtied_when )
@@ -861,7 +858,7 @@ DECLARE_EVENT_CLASS(writeback_inode_template,
TP_printk("dev %d,%d ino %lu dirtied %lu state %s mode 0%o",
MAJOR(__entry->dev), MINOR(__entry->dev),
- __entry->ino, __entry->dirtied_when,
+ (unsigned long)__entry->ino, __entry->dirtied_when,
show_inode_state(__entry->state), __entry->mode)
);
diff --git a/include/trace/events/xdp.h b/include/trace/events/xdp.h
index 8c8420230a10..b95d65e8c628 100644
--- a/include/trace/events/xdp.h
+++ b/include/trace/events/xdp.h
@@ -22,7 +22,7 @@
#define __XDP_ACT_SYM_FN(x) \
{ XDP_##x, #x },
#define __XDP_ACT_SYM_TAB \
- __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, 0 }
+ __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, NULL }
__XDP_ACT_MAP(__XDP_ACT_TP_FN)
TRACE_EVENT(xdp_exception,
@@ -79,14 +79,26 @@ TRACE_EVENT(xdp_bulk_tx,
__entry->sent, __entry->drops, __entry->err)
);
+#ifndef __DEVMAP_OBJ_TYPE
+#define __DEVMAP_OBJ_TYPE
+struct _bpf_dtab_netdev {
+ struct net_device *dev;
+};
+#endif /* __DEVMAP_OBJ_TYPE */
+
+#define devmap_ifindex(tgt, map) \
+ (((map->map_type == BPF_MAP_TYPE_DEVMAP || \
+ map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)) ? \
+ ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex : 0)
+
DECLARE_EVENT_CLASS(xdp_redirect_template,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
- int to_ifindex, int err,
- const struct bpf_map *map, u32 map_index),
+ const void *tgt, int err,
+ const struct bpf_map *map, u32 index),
- TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
+ TP_ARGS(dev, xdp, tgt, err, map, index),
TP_STRUCT__entry(
__field(int, prog_id)
@@ -103,90 +115,65 @@ DECLARE_EVENT_CLASS(xdp_redirect_template,
__entry->act = XDP_REDIRECT;
__entry->ifindex = dev->ifindex;
__entry->err = err;
- __entry->to_ifindex = to_ifindex;
+ __entry->to_ifindex = map ? devmap_ifindex(tgt, map) :
+ index;
__entry->map_id = map ? map->id : 0;
- __entry->map_index = map_index;
+ __entry->map_index = map ? index : 0;
),
- TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d",
+ TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
+ " map_id=%d map_index=%d",
__entry->prog_id,
__print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
__entry->ifindex, __entry->to_ifindex,
- __entry->err)
+ __entry->err, __entry->map_id, __entry->map_index)
);
DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
- int to_ifindex, int err,
- const struct bpf_map *map, u32 map_index),
- TP_ARGS(dev, xdp, to_ifindex, err, map, map_index)
+ const void *tgt, int err,
+ const struct bpf_map *map, u32 index),
+ TP_ARGS(dev, xdp, tgt, err, map, index)
);
DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
- int to_ifindex, int err,
- const struct bpf_map *map, u32 map_index),
- TP_ARGS(dev, xdp, to_ifindex, err, map, map_index)
+ const void *tgt, int err,
+ const struct bpf_map *map, u32 index),
+ TP_ARGS(dev, xdp, tgt, err, map, index)
);
#define _trace_xdp_redirect(dev, xdp, to) \
- trace_xdp_redirect(dev, xdp, to, 0, NULL, 0);
+ trace_xdp_redirect(dev, xdp, NULL, 0, NULL, to);
#define _trace_xdp_redirect_err(dev, xdp, to, err) \
- trace_xdp_redirect_err(dev, xdp, to, err, NULL, 0);
+ trace_xdp_redirect_err(dev, xdp, NULL, err, NULL, to);
+
+#define _trace_xdp_redirect_map(dev, xdp, to, map, index) \
+ trace_xdp_redirect(dev, xdp, to, 0, map, index);
-DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map,
+#define _trace_xdp_redirect_map_err(dev, xdp, to, map, index, err) \
+ trace_xdp_redirect_err(dev, xdp, to, err, map, index);
+
+/* not used anymore, but kept around so as not to break old programs */
+DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
- int to_ifindex, int err,
- const struct bpf_map *map, u32 map_index),
- TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
- TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
- " map_id=%d map_index=%d",
- __entry->prog_id,
- __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
- __entry->ifindex, __entry->to_ifindex,
- __entry->err,
- __entry->map_id, __entry->map_index)
+ const void *tgt, int err,
+ const struct bpf_map *map, u32 index),
+ TP_ARGS(dev, xdp, tgt, err, map, index)
);
-DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map_err,
+DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
TP_PROTO(const struct net_device *dev,
const struct bpf_prog *xdp,
- int to_ifindex, int err,
- const struct bpf_map *map, u32 map_index),
- TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
- TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
- " map_id=%d map_index=%d",
- __entry->prog_id,
- __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
- __entry->ifindex, __entry->to_ifindex,
- __entry->err,
- __entry->map_id, __entry->map_index)
+ const void *tgt, int err,
+ const struct bpf_map *map, u32 index),
+ TP_ARGS(dev, xdp, tgt, err, map, index)
);
-#ifndef __DEVMAP_OBJ_TYPE
-#define __DEVMAP_OBJ_TYPE
-struct _bpf_dtab_netdev {
- struct net_device *dev;
-};
-#endif /* __DEVMAP_OBJ_TYPE */
-
-#define devmap_ifindex(fwd, map) \
- ((map->map_type == BPF_MAP_TYPE_DEVMAP || \
- map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) ? \
- ((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0)
-
-#define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \
- trace_xdp_redirect_map(dev, xdp, devmap_ifindex(fwd, map), \
- 0, map, idx)
-
-#define _trace_xdp_redirect_map_err(dev, xdp, fwd, map, idx, err) \
- trace_xdp_redirect_map_err(dev, xdp, devmap_ifindex(fwd, map), \
- err, map, idx)
-
TRACE_EVENT(xdp_cpumap_kthread,
TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
@@ -259,43 +246,38 @@ TRACE_EVENT(xdp_cpumap_enqueue,
TRACE_EVENT(xdp_devmap_xmit,
- TP_PROTO(const struct bpf_map *map, u32 map_index,
- int sent, int drops,
- const struct net_device *from_dev,
- const struct net_device *to_dev, int err),
+ TP_PROTO(const struct net_device *from_dev,
+ const struct net_device *to_dev,
+ int sent, int drops, int err),
- TP_ARGS(map, map_index, sent, drops, from_dev, to_dev, err),
+ TP_ARGS(from_dev, to_dev, sent, drops, err),
TP_STRUCT__entry(
- __field(int, map_id)
+ __field(int, from_ifindex)
__field(u32, act)
- __field(u32, map_index)
+ __field(int, to_ifindex)
__field(int, drops)
__field(int, sent)
- __field(int, from_ifindex)
- __field(int, to_ifindex)
__field(int, err)
),
TP_fast_assign(
- __entry->map_id = map->id;
+ __entry->from_ifindex = from_dev->ifindex;
__entry->act = XDP_REDIRECT;
- __entry->map_index = map_index;
+ __entry->to_ifindex = to_dev->ifindex;
__entry->drops = drops;
__entry->sent = sent;
- __entry->from_ifindex = from_dev->ifindex;
- __entry->to_ifindex = to_dev->ifindex;
__entry->err = err;
),
TP_printk("ndo_xdp_xmit"
- " map_id=%d map_index=%d action=%s"
+ " from_ifindex=%d to_ifindex=%d action=%s"
" sent=%d drops=%d"
- " from_ifindex=%d to_ifindex=%d err=%d",
- __entry->map_id, __entry->map_index,
+ " err=%d",
+ __entry->from_ifindex, __entry->to_ifindex,
__print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
__entry->sent, __entry->drops,
- __entry->from_ifindex, __entry->to_ifindex, __entry->err)
+ __entry->err)
);
/* Expect users already include <net/xdp.h>, but not xdp_priv.h */
@@ -317,19 +299,15 @@ __MEM_TYPE_MAP(__MEM_TYPE_TP_FN)
TRACE_EVENT(mem_disconnect,
- TP_PROTO(const struct xdp_mem_allocator *xa,
- bool safe_to_remove, bool force),
+ TP_PROTO(const struct xdp_mem_allocator *xa),
- TP_ARGS(xa, safe_to_remove, force),
+ TP_ARGS(xa),
TP_STRUCT__entry(
__field(const struct xdp_mem_allocator *, xa)
__field(u32, mem_id)
__field(u32, mem_type)
__field(const void *, allocator)
- __field(bool, safe_to_remove)
- __field(bool, force)
- __field(int, disconnect_cnt)
),
TP_fast_assign(
@@ -337,19 +315,12 @@ TRACE_EVENT(mem_disconnect,
__entry->mem_id = xa->mem.id;
__entry->mem_type = xa->mem.type;
__entry->allocator = xa->allocator;
- __entry->safe_to_remove = safe_to_remove;
- __entry->force = force;
- __entry->disconnect_cnt = xa->disconnect_cnt;
),
- TP_printk("mem_id=%d mem_type=%s allocator=%p"
- " safe_to_remove=%s force=%s disconnect_cnt=%d",
+ TP_printk("mem_id=%d mem_type=%s allocator=%p",
__entry->mem_id,
__print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
- __entry->allocator,
- __entry->safe_to_remove ? "true" : "false",
- __entry->force ? "true" : "false",
- __entry->disconnect_cnt
+ __entry->allocator
)
);
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index 9a0e8af21310..a5ccfa67bc5c 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -66,7 +66,11 @@ TRACE_EVENT(xen_mc_callback,
TP_PROTO(xen_mc_callback_fn_t fn, void *data),
TP_ARGS(fn, data),
TP_STRUCT__entry(
- __field(xen_mc_callback_fn_t, fn)
+ /*
+ * Use field_struct to avoid is_signed_type()
+ * comparison of a function pointer.
+ */
+ __field_struct(xen_mc_callback_fn_t, fn)
__field(void *, data)
),
TP_fast_assign(
diff --git a/include/trace/trace_events.h b/include/trace/trace_events.h
index 4ecdfe2e3580..502c7be50b8d 100644
--- a/include/trace/trace_events.h
+++ b/include/trace/trace_events.h
@@ -2,7 +2,8 @@
/*
* Stage 1 of the trace events.
*
- * Override the macros in <trace/trace_events.h> to include the following:
+ * Override the macros in the event tracepoint header <trace/events/XXX.h>
+ * to include the following:
*
* struct trace_event_raw_<call> {
* struct trace_entry ent;
@@ -223,7 +224,8 @@ TRACE_MAKE_SYSTEM_STR();
/*
* Stage 3 of the trace events.
*
- * Override the macros in <trace/trace_events.h> to include the following:
+ * Override the macros in the event tracepoint header <trace/events/XXX.h>
+ * to include the following:
*
* enum print_line_t
* trace_raw_output_<call>(struct trace_iterator *iter, int flags)
@@ -340,6 +342,12 @@ TRACE_MAKE_SYSTEM_STR();
trace_print_array_seq(p, array, count, el_size); \
})
+#undef __print_hex_dump
+#define __print_hex_dump(prefix_str, prefix_type, \
+ rowsize, groupsize, buf, len, ascii) \
+ trace_print_hex_dump_seq(p, prefix_str, prefix_type, \
+ rowsize, groupsize, buf, len, ascii)
+
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace enum print_line_t \
@@ -394,22 +402,16 @@ static struct trace_event_functions trace_event_type_funcs_##call = { \
#include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
#undef __field_ext
-#define __field_ext(type, item, filter_type) \
- ret = trace_define_field(event_call, #type, #item, \
- offsetof(typeof(field), item), \
- sizeof(field.item), \
- is_signed_type(type), filter_type); \
- if (ret) \
- return ret;
+#define __field_ext(_type, _item, _filter_type) { \
+ .type = #_type, .name = #_item, \
+ .size = sizeof(_type), .align = __alignof__(_type), \
+ .is_signed = is_signed_type(_type), .filter_type = _filter_type },
#undef __field_struct_ext
-#define __field_struct_ext(type, item, filter_type) \
- ret = trace_define_field(event_call, #type, #item, \
- offsetof(typeof(field), item), \
- sizeof(field.item), \
- 0, filter_type); \
- if (ret) \
- return ret;
+#define __field_struct_ext(_type, _item, _filter_type) { \
+ .type = #_type, .name = #_item, \
+ .size = sizeof(_type), .align = __alignof__(_type), \
+ 0, .filter_type = _filter_type },
#undef __field
#define __field(type, item) __field_ext(type, item, FILTER_OTHER)
@@ -418,25 +420,16 @@ static struct trace_event_functions trace_event_type_funcs_##call = { \
#define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER)
#undef __array
-#define __array(type, item, len) \
- do { \
- char *type_str = #type"["__stringify(len)"]"; \
- BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
- BUILD_BUG_ON(len <= 0); \
- ret = trace_define_field(event_call, type_str, #item, \
- offsetof(typeof(field), item), \
- sizeof(field.item), \
- is_signed_type(type), FILTER_OTHER); \
- if (ret) \
- return ret; \
- } while (0);
+#define __array(_type, _item, _len) { \
+ .type = #_type"["__stringify(_len)"]", .name = #_item, \
+ .size = sizeof(_type[_len]), .align = __alignof__(_type), \
+ .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
#undef __dynamic_array
-#define __dynamic_array(type, item, len) \
- ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
- offsetof(typeof(field), __data_loc_##item), \
- sizeof(field.__data_loc_##item), \
- is_signed_type(type), FILTER_OTHER);
+#define __dynamic_array(_type, _item, _len) { \
+ .type = "__data_loc " #_type "[]", .name = #_item, \
+ .size = 4, .align = 4, \
+ .is_signed = is_signed_type(_type), .filter_type = FILTER_OTHER },
#undef __string
#define __string(item, src) __dynamic_array(char, item, -1)
@@ -446,16 +439,9 @@ static struct trace_event_functions trace_event_type_funcs_##call = { \
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
-static int notrace __init \
-trace_event_define_fields_##call(struct trace_event_call *event_call) \
-{ \
- struct trace_event_raw_##call field; \
- int ret; \
- \
- tstruct; \
- \
- return ret; \
-}
+static struct trace_event_fields trace_event_fields_##call[] = { \
+ tstruct \
+ {} };
#undef DEFINE_EVENT
#define DEFINE_EVENT(template, name, proto, args)
@@ -549,7 +535,8 @@ static inline notrace int trace_event_get_offsets_##call( \
/*
* Stage 4 of the trace events.
*
- * Override the macros in <trace/trace_events.h> to include the following:
+ * Override the macros in the event tracepoint header <trace/events/XXX.h>
+ * to include the following:
*
* For those macros defined with TRACE_EVENT:
*
@@ -564,7 +551,7 @@ static inline notrace int trace_event_get_offsets_##call( \
* enum event_trigger_type __tt = ETT_NONE;
* struct ring_buffer_event *event;
* struct trace_event_raw_<call> *entry; <-- defined in stage 1
- * struct ring_buffer *buffer;
+ * struct trace_buffer *buffer;
* unsigned long irq_flags;
* int __data_size;
* int pc;
@@ -613,7 +600,7 @@ static inline notrace int trace_event_get_offsets_##call( \
*
* static struct trace_event_class __used event_class_<template> = {
* .system = "<system>",
- * .define_fields = trace_event_define_fields_<call>,
+ * .fields_array = trace_event_fields_<call>,
* .fields = LIST_HEAD_INIT(event_class_##call.fields),
* .raw_init = trace_event_raw_init,
* .probe = trace_event_raw_event_##call,
@@ -751,6 +738,7 @@ static inline void ftrace_test_probe_##call(void) \
#undef __get_str
#undef __get_bitmask
#undef __print_array
+#undef __print_hex_dump
#undef TP_printk
#define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
@@ -761,7 +749,7 @@ _TRACE_PERF_PROTO(call, PARAMS(proto)); \
static char print_fmt_##call[] = print; \
static struct trace_event_class __used __refdata event_class_##call = { \
.system = TRACE_SYSTEM_STRING, \
- .define_fields = trace_event_define_fields_##call, \
+ .fields_array = trace_event_fields_##call, \
.fields = LIST_HEAD_INIT(event_class_##call.fields),\
.raw_init = trace_event_raw_init, \
.probe = trace_event_raw_event_##call, \
diff --git a/include/uapi/asm-generic/ipcbuf.h b/include/uapi/asm-generic/ipcbuf.h
index 7d80dbd336fb..41a01b494fc7 100644
--- a/include/uapi/asm-generic/ipcbuf.h
+++ b/include/uapi/asm-generic/ipcbuf.h
@@ -2,6 +2,8 @@
#ifndef __ASM_GENERIC_IPCBUF_H
#define __ASM_GENERIC_IPCBUF_H
+#include <linux/posix_types.h>
+
/*
* The generic ipc64_perm structure:
* Note extra padding because this structure is passed back and forth
diff --git a/include/uapi/asm-generic/mman-common.h b/include/uapi/asm-generic/mman-common.h
index c160a5354eb6..f94f65d429be 100644
--- a/include/uapi/asm-generic/mman-common.h
+++ b/include/uapi/asm-generic/mman-common.h
@@ -11,6 +11,8 @@
#define PROT_WRITE 0x2 /* page can be written */
#define PROT_EXEC 0x4 /* page can be executed */
#define PROT_SEM 0x8 /* page may be used for atomic ops */
+/* 0x10 reserved for arch-specific use */
+/* 0x20 reserved for arch-specific use */
#define PROT_NONE 0x0 /* page can not be accessed */
#define PROT_GROWSDOWN 0x01000000 /* mprotect flag: extend change to start of growsdown vma */
#define PROT_GROWSUP 0x02000000 /* mprotect flag: extend change to end of growsup vma */
diff --git a/include/uapi/asm-generic/msgbuf.h b/include/uapi/asm-generic/msgbuf.h
index 9fe4881557cb..6504d7b741ce 100644
--- a/include/uapi/asm-generic/msgbuf.h
+++ b/include/uapi/asm-generic/msgbuf.h
@@ -3,6 +3,8 @@
#define __ASM_GENERIC_MSGBUF_H
#include <asm/bitsperlong.h>
+#include <asm/ipcbuf.h>
+
/*
* generic msqid64_ds structure.
*
@@ -13,9 +15,9 @@
* everyone just ended up making identical copies without specific
* optimizations, so we may just as well all use the same one.
*
- * 64 bit architectures typically define a 64 bit __kernel_time_t,
- * so they do not need the first three padding words.
- * On big-endian systems, the padding is in the wrong place.
+ * 64 bit architectures use a 64-bit long time field here, while
+ * 32 bit architectures have a pair of unsigned long values.
+ * On big-endian systems, the lower half is in the wrong place.
*
* Pad space is left for:
* - 2 miscellaneous 32-bit values
@@ -24,9 +26,9 @@
struct msqid64_ds {
struct ipc64_perm msg_perm;
#if __BITS_PER_LONG == 64
- __kernel_time_t msg_stime; /* last msgsnd time */
- __kernel_time_t msg_rtime; /* last msgrcv time */
- __kernel_time_t msg_ctime; /* last change time */
+ long msg_stime; /* last msgsnd time */
+ long msg_rtime; /* last msgrcv time */
+ long msg_ctime; /* last change time */
#else
unsigned long msg_stime; /* last msgsnd time */
unsigned long msg_stime_high;
diff --git a/include/uapi/asm-generic/posix_types.h b/include/uapi/asm-generic/posix_types.h
index f0733a26ebfc..b5f7594eee7a 100644
--- a/include/uapi/asm-generic/posix_types.h
+++ b/include/uapi/asm-generic/posix_types.h
@@ -86,7 +86,10 @@ typedef struct {
*/
typedef __kernel_long_t __kernel_off_t;
typedef long long __kernel_loff_t;
+typedef __kernel_long_t __kernel_old_time_t;
+#ifndef __KERNEL__
typedef __kernel_long_t __kernel_time_t;
+#endif
typedef long long __kernel_time64_t;
typedef __kernel_long_t __kernel_clock_t;
typedef int __kernel_timer_t;
diff --git a/include/uapi/asm-generic/sembuf.h b/include/uapi/asm-generic/sembuf.h
index 0bae010f1b64..0e709bd3d730 100644
--- a/include/uapi/asm-generic/sembuf.h
+++ b/include/uapi/asm-generic/sembuf.h
@@ -3,6 +3,7 @@
#define __ASM_GENERIC_SEMBUF_H
#include <asm/bitsperlong.h>
+#include <asm/ipcbuf.h>
/*
* The semid64_ds structure for x86 architecture.
@@ -13,9 +14,8 @@
* everyone just ended up making identical copies without specific
* optimizations, so we may just as well all use the same one.
*
- * 64 bit architectures use a 64-bit __kernel_time_t here, while
+ * 64 bit architectures use a 64-bit long time field here, while
* 32 bit architectures have a pair of unsigned long values.
- * so they do not need the first two padding words.
*
* On big-endian systems, the padding is in the wrong place for
* historic reasons, so user space has to reconstruct a time_t
@@ -29,8 +29,8 @@
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
#if __BITS_PER_LONG == 64
- __kernel_time_t sem_otime; /* last semop time */
- __kernel_time_t sem_ctime; /* last change time */
+ long sem_otime; /* last semop time */
+ long sem_ctime; /* last change time */
#else
unsigned long sem_otime; /* last semop time */
unsigned long sem_otime_high;
diff --git a/include/uapi/asm-generic/shmbuf.h b/include/uapi/asm-generic/shmbuf.h
index e504422fc501..2bab955e0fed 100644
--- a/include/uapi/asm-generic/shmbuf.h
+++ b/include/uapi/asm-generic/shmbuf.h
@@ -13,9 +13,9 @@
* everyone just ended up making identical copies without specific
* optimizations, so we may just as well all use the same one.
*
- * 64 bit architectures typically define a 64 bit __kernel_time_t,
- * so they do not need the first two padding words.
- * On big-endian systems, the padding is in the wrong place.
+ * 64 bit architectures use a 64-bit long time field here, while
+ * 32 bit architectures have a pair of unsigned long values.
+ * On big-endian systems, the lower half is in the wrong place.
*
*
* Pad space is left for:
@@ -26,9 +26,9 @@ struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
size_t shm_segsz; /* size of segment (bytes) */
#if __BITS_PER_LONG == 64
- __kernel_time_t shm_atime; /* last attach time */
- __kernel_time_t shm_dtime; /* last detach time */
- __kernel_time_t shm_ctime; /* last change time */
+ long shm_atime; /* last attach time */
+ long shm_dtime; /* last detach time */
+ long shm_ctime; /* last change time */
#else
unsigned long shm_atime; /* last attach time */
unsigned long shm_atime_high;
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index 1fc8faa6e973..3a3201e4618e 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -851,8 +851,13 @@ __SYSCALL(__NR_pidfd_open, sys_pidfd_open)
__SYSCALL(__NR_clone3, sys_clone3)
#endif
+#define __NR_openat2 437
+__SYSCALL(__NR_openat2, sys_openat2)
+#define __NR_pidfd_getfd 438
+__SYSCALL(__NR_pidfd_getfd, sys_pidfd_getfd)
+
#undef __NR_syscalls
-#define __NR_syscalls 436
+#define __NR_syscalls 439
/*
* 32 bit systems traditionally used different
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 4fe35d600ab8..ac3879829bb5 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -500,6 +500,8 @@ struct drm_amdgpu_gem_op {
#define AMDGPU_VM_MTYPE_CC (3 << 5)
/* Use UC MTYPE instead of default MTYPE */
#define AMDGPU_VM_MTYPE_UC (4 << 5)
+/* Use RW MTYPE instead of default MTYPE */
+#define AMDGPU_VM_MTYPE_RW (5 << 5)
struct drm_amdgpu_gem_va {
/** GEM object handle */
@@ -701,6 +703,9 @@ struct drm_amdgpu_cs_chunk_data {
/* Subquery id: Query DMCU firmware version */
#define AMDGPU_INFO_FW_DMCU 0x12
#define AMDGPU_INFO_FW_TA 0x13
+ /* Subquery id: Query DMCUB firmware version */
+ #define AMDGPU_INFO_FW_DMCUB 0x14
+
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
/* the used VRAM size */
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index 8a5b2f8f8eb9..868bf7996c0f 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -778,11 +778,12 @@ struct drm_syncobj_array {
__u32 pad;
};
+#define DRM_SYNCOBJ_QUERY_FLAGS_LAST_SUBMITTED (1 << 0) /* last available point on timeline syncobj */
struct drm_syncobj_timeline_array {
__u64 handles;
__u64 points;
__u32 count_handles;
- __u32 pad;
+ __u32 flags;
};
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index 3feeaa3f987a..8bc0b31597d8 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -69,7 +69,7 @@ extern "C" {
#define fourcc_code(a, b, c, d) ((__u32)(a) | ((__u32)(b) << 8) | \
((__u32)(c) << 16) | ((__u32)(d) << 24))
-#define DRM_FORMAT_BIG_ENDIAN (1<<31) /* format is big endian instead of little endian */
+#define DRM_FORMAT_BIG_ENDIAN (1U<<31) /* format is big endian instead of little endian */
/* Reserve 0 for the invalid format specifier */
#define DRM_FORMAT_INVALID 0
@@ -411,6 +411,30 @@ extern "C" {
#define I915_FORMAT_MOD_Yf_TILED_CCS fourcc_mod_code(INTEL, 5)
/*
+ * Intel color control surfaces (CCS) for Gen-12 render compression.
+ *
+ * The main surface is Y-tiled and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * Y-tile widths.
+ */
+#define I915_FORMAT_MOD_Y_TILED_GEN12_RC_CCS fourcc_mod_code(INTEL, 6)
+
+/*
+ * Intel color control surfaces (CCS) for Gen-12 media compression
+ *
+ * The main surface is Y-tiled and at plane index 0, the CCS is linear and
+ * at index 1. A 64B CCS cache line corresponds to an area of 4x1 tiles in
+ * main surface. In other words, 4 bits in CCS map to a main surface cache
+ * line pair. The main surface pitch is required to be a multiple of four
+ * Y-tile widths. For semi-planar formats like NV12, CCS planes follow the
+ * Y and UV planes i.e., planes 0 and 1 are used for Y and UV surfaces,
+ * planes 2 and 3 for the respective CCS.
+ */
+#define I915_FORMAT_MOD_Y_TILED_GEN12_MC_CCS fourcc_mod_code(INTEL, 7)
+
+/*
* Tiled, NV12MT, grouped in 64 (pixels) x 32 (lines) -sized macroblocks
*
* Macroblocks are laid in a Z-shape, and each pixel data is following the
@@ -648,7 +672,21 @@ extern "C" {
* Further information on the use of AFBC modifiers can be found in
* Documentation/gpu/afbc.rst
*/
-#define DRM_FORMAT_MOD_ARM_AFBC(__afbc_mode) fourcc_mod_code(ARM, __afbc_mode)
+
+/*
+ * The top 4 bits (out of the 56 bits alloted for specifying vendor specific
+ * modifiers) denote the category for modifiers. Currently we have only two
+ * categories of modifiers ie AFBC and MISC. We can have a maximum of sixteen
+ * different categories.
+ */
+#define DRM_FORMAT_MOD_ARM_CODE(__type, __val) \
+ fourcc_mod_code(ARM, ((__u64)(__type) << 52) | ((__val) & 0x000fffffffffffffULL))
+
+#define DRM_FORMAT_MOD_ARM_TYPE_AFBC 0x00
+#define DRM_FORMAT_MOD_ARM_TYPE_MISC 0x01
+
+#define DRM_FORMAT_MOD_ARM_AFBC(__afbc_mode) \
+ DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_AFBC, __afbc_mode)
/*
* AFBC superblock size
@@ -743,6 +781,16 @@ extern "C" {
#define AFBC_FORMAT_MOD_BCH (1ULL << 11)
/*
+ * Arm 16x16 Block U-Interleaved modifier
+ *
+ * This is used by Arm Mali Utgard and Midgard GPUs. It divides the image
+ * into 16x16 pixel blocks. Blocks are stored linearly in order, but pixels
+ * in the block are reordered.
+ */
+#define DRM_FORMAT_MOD_ARM_16X16_BLOCK_U_INTERLEAVED \
+ DRM_FORMAT_MOD_ARM_CODE(DRM_FORMAT_MOD_ARM_TYPE_MISC, 1ULL)
+
+/*
* Allwinner tiled modifier
*
* This tiling mode is implemented by the VPU found on all Allwinner platforms,
diff --git a/include/uapi/drm/exynos_drm.h b/include/uapi/drm/exynos_drm.h
index 3e59b8382dd8..a51aa1c618c1 100644
--- a/include/uapi/drm/exynos_drm.h
+++ b/include/uapi/drm/exynos_drm.h
@@ -68,7 +68,7 @@ struct drm_exynos_gem_info {
/**
* A structure for user connection request of virtual display.
*
- * @connection: indicate whether doing connetion or not by user.
+ * @connection: indicate whether doing connection or not by user.
* @extensions: if this value is 1 then the vidi driver would need additional
* 128bytes edid data.
* @edid: the edid data pointer from user side.
@@ -394,7 +394,7 @@ struct drm_exynos_ioctl_ipp_commit {
#define DRM_IOCTL_EXYNOS_IPP_COMMIT DRM_IOWR(DRM_COMMAND_BASE + \
DRM_EXYNOS_IPP_COMMIT, struct drm_exynos_ioctl_ipp_commit)
-/* EXYNOS specific events */
+/* Exynos specific events */
#define DRM_EXYNOS_G2D_EVENT 0x80000000
#define DRM_EXYNOS_IPP_EVENT 0x80000002
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 469dc512cca3..829c0a48577f 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -395,6 +395,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite)
#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap)
#define DRM_IOCTL_I915_GEM_MMAP_GTT DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_gtt)
+#define DRM_IOCTL_I915_GEM_MMAP_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP_GTT, struct drm_i915_gem_mmap_offset)
#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain)
#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish)
#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling)
@@ -611,6 +612,13 @@ typedef struct drm_i915_irq_wait {
* See I915_EXEC_FENCE_OUT and I915_EXEC_FENCE_SUBMIT.
*/
#define I915_PARAM_HAS_EXEC_SUBMIT_FENCE 53
+
+/*
+ * Revision of the i915-perf uAPI. The value returned helps determine what
+ * i915-perf features are available. See drm_i915_perf_property_id.
+ */
+#define I915_PARAM_PERF_REVISION 54
+
/* Must be kept compact -- no holes and well documented */
typedef struct drm_i915_getparam {
@@ -786,6 +794,37 @@ struct drm_i915_gem_mmap_gtt {
__u64 offset;
};
+struct drm_i915_gem_mmap_offset {
+ /** Handle for the object being mapped. */
+ __u32 handle;
+ __u32 pad;
+ /**
+ * Fake offset to use for subsequent mmap call
+ *
+ * This is a fixed-size type for 32/64 compatibility.
+ */
+ __u64 offset;
+
+ /**
+ * Flags for extended behaviour.
+ *
+ * It is mandatory that one of the MMAP_OFFSET types
+ * (GTT, WC, WB, UC, etc) should be included.
+ */
+ __u64 flags;
+#define I915_MMAP_OFFSET_GTT 0
+#define I915_MMAP_OFFSET_WC 1
+#define I915_MMAP_OFFSET_WB 2
+#define I915_MMAP_OFFSET_UC 3
+
+ /*
+ * Zero-terminated chain of extensions.
+ *
+ * No current extensions defined; mbz.
+ */
+ __u64 extensions;
+};
+
struct drm_i915_gem_set_domain {
/** Handle for the object */
__u32 handle;
@@ -1565,6 +1604,21 @@ struct drm_i915_gem_context_param {
* i915_context_engines_bond (I915_CONTEXT_ENGINES_EXT_BOND)
*/
#define I915_CONTEXT_PARAM_ENGINES 0xa
+
+/*
+ * I915_CONTEXT_PARAM_PERSISTENCE:
+ *
+ * Allow the context and active rendering to survive the process until
+ * completion. Persistence allows fire-and-forget clients to queue up a
+ * bunch of work, hand the output over to a display server and then quit.
+ * If the context is marked as not persistent, upon closing (either via
+ * an explicit DRM_I915_GEM_CONTEXT_DESTROY or implicitly from file closure
+ * or process termination), the context and any outstanding requests will be
+ * cancelled (and exported fences for cancelled requests marked as -EIO).
+ *
+ * By default, new contexts allow persistence.
+ */
+#define I915_CONTEXT_PARAM_PERSISTENCE 0xb
/* Must be kept compact -- no holes and well documented */
__u64 value;
@@ -1844,23 +1898,31 @@ enum drm_i915_perf_property_id {
* Open the stream for a specific context handle (as used with
* execbuffer2). A stream opened for a specific context this way
* won't typically require root privileges.
+ *
+ * This property is available in perf revision 1.
*/
DRM_I915_PERF_PROP_CTX_HANDLE = 1,
/**
* A value of 1 requests the inclusion of raw OA unit reports as
* part of stream samples.
+ *
+ * This property is available in perf revision 1.
*/
DRM_I915_PERF_PROP_SAMPLE_OA,
/**
* The value specifies which set of OA unit metrics should be
* be configured, defining the contents of any OA unit reports.
+ *
+ * This property is available in perf revision 1.
*/
DRM_I915_PERF_PROP_OA_METRICS_SET,
/**
* The value specifies the size and layout of OA unit reports.
+ *
+ * This property is available in perf revision 1.
*/
DRM_I915_PERF_PROP_OA_FORMAT,
@@ -1870,9 +1932,22 @@ enum drm_i915_perf_property_id {
* from this exponent as follows:
*
* 80ns * 2^(period_exponent + 1)
+ *
+ * This property is available in perf revision 1.
*/
DRM_I915_PERF_PROP_OA_EXPONENT,
+ /**
+ * Specifying this property is only valid when specify a context to
+ * filter with DRM_I915_PERF_PROP_CTX_HANDLE. Specifying this property
+ * will hold preemption of the particular context we want to gather
+ * performance data about. The execbuf2 submissions must include a
+ * drm_i915_gem_execbuffer_ext_perf parameter for this to apply.
+ *
+ * This property is available in perf revision 3.
+ */
+ DRM_I915_PERF_PROP_HOLD_PREEMPTION,
+
DRM_I915_PERF_PROP_MAX /* non-ABI */
};
@@ -1901,6 +1976,8 @@ struct drm_i915_perf_open_param {
* to close and re-open a stream with the same configuration.
*
* It's undefined whether any pending data for the stream will be lost.
+ *
+ * This ioctl is available in perf revision 1.
*/
#define I915_PERF_IOCTL_ENABLE _IO('i', 0x0)
@@ -1908,10 +1985,25 @@ struct drm_i915_perf_open_param {
* Disable data capture for a stream.
*
* It is an error to try and read a stream that is disabled.
+ *
+ * This ioctl is available in perf revision 1.
*/
#define I915_PERF_IOCTL_DISABLE _IO('i', 0x1)
/**
+ * Change metrics_set captured by a stream.
+ *
+ * If the stream is bound to a specific context, the configuration change
+ * will performed inline with that context such that it takes effect before
+ * the next execbuf submission.
+ *
+ * Returns the previously bound metrics set id, or a negative error code.
+ *
+ * This ioctl is available in perf revision 2.
+ */
+#define I915_PERF_IOCTL_CONFIG _IO('i', 0x2)
+
+/**
* Common to all i915 perf records
*/
struct drm_i915_perf_record_header {
@@ -1984,6 +2076,7 @@ struct drm_i915_query_item {
__u64 query_id;
#define DRM_I915_QUERY_TOPOLOGY_INFO 1
#define DRM_I915_QUERY_ENGINE_INFO 2
+#define DRM_I915_QUERY_PERF_CONFIG 3
/* Must be kept compact -- no holes and well documented */
/*
@@ -1995,9 +2088,18 @@ struct drm_i915_query_item {
__s32 length;
/*
- * Unused for now. Must be cleared to zero.
+ * When query_id == DRM_I915_QUERY_TOPOLOGY_INFO, must be 0.
+ *
+ * When query_id == DRM_I915_QUERY_PERF_CONFIG, must be one of the
+ * following :
+ * - DRM_I915_QUERY_PERF_CONFIG_LIST
+ * - DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID
+ * - DRM_I915_QUERY_PERF_CONFIG_FOR_UUID
*/
__u32 flags;
+#define DRM_I915_QUERY_PERF_CONFIG_LIST 1
+#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID 2
+#define DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID 3
/*
* Data will be written at the location pointed by data_ptr when the
@@ -2033,8 +2135,10 @@ struct drm_i915_query {
* (data[X / 8] >> (X % 8)) & 1
*
* - the subslice mask for each slice with one bit per subslice telling
- * whether a subslice is available. The availability of subslice Y in slice
- * X can be queried with the following formula :
+ * whether a subslice is available. Gen12 has dual-subslices, which are
+ * similar to two gen11 subslices. For gen12, this array represents dual-
+ * subslices. The availability of subslice Y in slice X can be queried
+ * with the following formula :
*
* (data[subslice_offset +
* X * subslice_stride +
@@ -2123,6 +2227,56 @@ struct drm_i915_query_engine_info {
struct drm_i915_engine_info engines[];
};
+/*
+ * Data written by the kernel with query DRM_I915_QUERY_PERF_CONFIG.
+ */
+struct drm_i915_query_perf_config {
+ union {
+ /*
+ * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 sets
+ * this fields to the number of configurations available.
+ */
+ __u64 n_configs;
+
+ /*
+ * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_ID,
+ * i915 will use the value in this field as configuration
+ * identifier to decide what data to write into config_ptr.
+ */
+ __u64 config;
+
+ /*
+ * When query_id == DRM_I915_QUERY_PERF_CONFIG_DATA_FOR_UUID,
+ * i915 will use the value in this field as configuration
+ * identifier to decide what data to write into config_ptr.
+ *
+ * String formatted like "%08x-%04x-%04x-%04x-%012x"
+ */
+ char uuid[36];
+ };
+
+ /*
+ * Unused for now. Must be cleared to zero.
+ */
+ __u32 flags;
+
+ /*
+ * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_LIST, i915 will
+ * write an array of __u64 of configuration identifiers.
+ *
+ * When query_item.flags == DRM_I915_QUERY_PERF_CONFIG_DATA, i915 will
+ * write a struct drm_i915_perf_oa_config. If the following fields of
+ * drm_i915_perf_oa_config are set not set to 0, i915 will write into
+ * the associated pointers the values of submitted when the
+ * configuration was created :
+ *
+ * - n_mux_regs
+ * - n_boolean_regs
+ * - n_flex_regs
+ */
+ __u8 data[];
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/nouveau_drm.h b/include/uapi/drm/nouveau_drm.h
index 9459a6e3bc1f..853a327433d3 100644
--- a/include/uapi/drm/nouveau_drm.h
+++ b/include/uapi/drm/nouveau_drm.h
@@ -110,6 +110,7 @@ struct drm_nouveau_gem_pushbuf {
__u64 push;
__u32 suffix0;
__u32 suffix1;
+#define NOUVEAU_GEM_PUSHBUF_SYNC (1ULL << 0)
__u64 vram_available;
__u64 gart_available;
};
diff --git a/include/uapi/drm/omap_drm.h b/include/uapi/drm/omap_drm.h
index 1fccffef9e27..5a142fad473c 100644
--- a/include/uapi/drm/omap_drm.h
+++ b/include/uapi/drm/omap_drm.h
@@ -38,20 +38,20 @@ struct drm_omap_param {
__u64 value; /* in (set_param), out (get_param) */
};
-#define OMAP_BO_SCANOUT 0x00000001 /* scanout capable (phys contiguous) */
-#define OMAP_BO_CACHE_MASK 0x00000006 /* cache type mask, see cache modes */
-#define OMAP_BO_TILED_MASK 0x00000f00 /* tiled mapping mask, see tiled modes */
+/* Scanout buffer, consumable by DSS */
+#define OMAP_BO_SCANOUT 0x00000001
-/* cache modes */
-#define OMAP_BO_CACHED 0x00000000 /* default */
-#define OMAP_BO_WC 0x00000002 /* write-combine */
-#define OMAP_BO_UNCACHED 0x00000004 /* strongly-ordered (uncached) */
+/* Buffer CPU caching mode: cached, write-combining or uncached. */
+#define OMAP_BO_CACHED 0x00000000
+#define OMAP_BO_WC 0x00000002
+#define OMAP_BO_UNCACHED 0x00000004
+#define OMAP_BO_CACHE_MASK 0x00000006
-/* tiled modes */
+/* Use TILER for the buffer. The TILER container unit can be 8, 16 or 32 bits. */
#define OMAP_BO_TILED_8 0x00000100
#define OMAP_BO_TILED_16 0x00000200
#define OMAP_BO_TILED_32 0x00000300
-#define OMAP_BO_TILED (OMAP_BO_TILED_8 | OMAP_BO_TILED_16 | OMAP_BO_TILED_32)
+#define OMAP_BO_TILED_MASK 0x00000f00
union omap_gem_size {
__u32 bytes; /* (for non-tiled formats) */
diff --git a/include/uapi/drm/v3d_drm.h b/include/uapi/drm/v3d_drm.h
index 58fbe48c91e9..1ce746e228d9 100644
--- a/include/uapi/drm/v3d_drm.h
+++ b/include/uapi/drm/v3d_drm.h
@@ -48,6 +48,8 @@ extern "C" {
#define DRM_IOCTL_V3D_SUBMIT_TFU DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_TFU, struct drm_v3d_submit_tfu)
#define DRM_IOCTL_V3D_SUBMIT_CSD DRM_IOW(DRM_COMMAND_BASE + DRM_V3D_SUBMIT_CSD, struct drm_v3d_submit_csd)
+#define DRM_V3D_SUBMIT_CL_FLUSH_CACHE 0x01
+
/**
* struct drm_v3d_submit_cl - ioctl argument for submitting commands to the 3D
* engine.
@@ -61,7 +63,7 @@ extern "C" {
* flushed by the time the render done IRQ happens, which is the
* trigger for out_sync. Any dirtying of cachelines by the job (only
* possible using TMU writes) must be flushed by the caller using the
- * CL's cache flush commands.
+ * DRM_V3D_SUBMIT_CL_FLUSH_CACHE_FLAG flag.
*/
struct drm_v3d_submit_cl {
/* Pointer to the binner command list.
@@ -124,8 +126,7 @@ struct drm_v3d_submit_cl {
/* Number of BO handles passed in (size is that times 4). */
__u32 bo_handle_count;
- /* Pad, must be zero-filled. */
- __u32 pad;
+ __u32 flags;
};
/**
@@ -193,6 +194,7 @@ enum drm_v3d_param {
DRM_V3D_PARAM_V3D_CORE0_IDENT2,
DRM_V3D_PARAM_SUPPORTS_TFU,
DRM_V3D_PARAM_SUPPORTS_CSD,
+ DRM_V3D_PARAM_SUPPORTS_CACHE_FLUSH,
};
struct drm_v3d_get_param {
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index 399f58317cff..fcb741e3068f 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -71,6 +71,7 @@ extern "C" {
#define DRM_VMW_CREATE_EXTENDED_CONTEXT 26
#define DRM_VMW_GB_SURFACE_CREATE_EXT 27
#define DRM_VMW_GB_SURFACE_REF_EXT 28
+#define DRM_VMW_MSG 29
/*************************************************************************/
/**
@@ -891,11 +892,13 @@ struct drm_vmw_shader_arg {
* surface.
* @drm_vmw_surface_flag_create_buffer: Create a backup buffer if none is
* given.
+ * @drm_vmw_surface_flag_coherent: Back surface with coherent memory.
*/
enum drm_vmw_surface_flags {
drm_vmw_surface_flag_shareable = (1 << 0),
drm_vmw_surface_flag_scanout = (1 << 1),
- drm_vmw_surface_flag_create_buffer = (1 << 2)
+ drm_vmw_surface_flag_create_buffer = (1 << 2),
+ drm_vmw_surface_flag_coherent = (1 << 3),
};
/**
@@ -1211,6 +1214,22 @@ union drm_vmw_gb_surface_reference_ext_arg {
struct drm_vmw_surface_arg req;
};
+/**
+ * struct drm_vmw_msg_arg
+ *
+ * @send: Pointer to user-space msg string (null terminated).
+ * @receive: Pointer to user-space receive buffer.
+ * @send_only: Boolean whether this is only sending or receiving too.
+ *
+ * Argument to the DRM_VMW_MSG ioctl.
+ */
+struct drm_vmw_msg_arg {
+ __u64 send;
+ __u64 receive;
+ __s32 send_only;
+ __u32 receive_len;
+};
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/linux/acct.h b/include/uapi/linux/acct.h
index 0e72172cd23a..985b89068591 100644
--- a/include/uapi/linux/acct.h
+++ b/include/uapi/linux/acct.h
@@ -49,6 +49,7 @@ struct acct
__u16 ac_uid16; /* LSB of Real User ID */
__u16 ac_gid16; /* LSB of Real Group ID */
__u16 ac_tty; /* Control Terminal */
+ /* __u32 range means times from 1970 to 2106 */
__u32 ac_btime; /* Process Creation Time */
comp_t ac_utime; /* User Time */
comp_t ac_stime; /* System Time */
@@ -81,6 +82,7 @@ struct acct_v3
__u32 ac_gid; /* Real Group ID */
__u32 ac_pid; /* Process ID */
__u32 ac_ppid; /* Parent Process ID */
+ /* __u32 range means times from 1970 to 2106 */
__u32 ac_btime; /* Process Creation Time */
#ifdef __KERNEL__
__u32 ac_etime; /* Elapsed Time */
diff --git a/include/uapi/linux/audit.h b/include/uapi/linux/audit.h
index c89c6495983d..a534d71e689a 100644
--- a/include/uapi/linux/audit.h
+++ b/include/uapi/linux/audit.h
@@ -116,6 +116,7 @@
#define AUDIT_FANOTIFY 1331 /* Fanotify access decision */
#define AUDIT_TIME_INJOFFSET 1332 /* Timekeeping offset injected */
#define AUDIT_TIME_ADJNTPVAL 1333 /* NTP value adjustment */
+#define AUDIT_BPF 1334 /* BPF subsystem */
#define AUDIT_AVC 1400 /* SE Linux avc denial or grant */
#define AUDIT_SELINUX_ERR 1401 /* Internal SE Linux Errors */
@@ -143,6 +144,7 @@
#define AUDIT_ANOM_PROMISCUOUS 1700 /* Device changed promiscuous mode */
#define AUDIT_ANOM_ABEND 1701 /* Process ended abnormally */
#define AUDIT_ANOM_LINK 1702 /* Suspicious use of file links */
+#define AUDIT_ANOM_CREAT 1703 /* Suspicious file creation */
#define AUDIT_INTEGRITY_DATA 1800 /* Data integrity verification */
#define AUDIT_INTEGRITY_METADATA 1801 /* Metadata integrity verification */
#define AUDIT_INTEGRITY_STATUS 1802 /* Integrity enable status */
diff --git a/include/uapi/linux/b1lli.h b/include/uapi/linux/b1lli.h
deleted file mode 100644
index 4ae6ac9950df..000000000000
--- a/include/uapi/linux/b1lli.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/* $Id: b1lli.h,v 1.8.8.3 2001/09/23 22:25:05 kai Exp $
- *
- * ISDN lowlevel-module for AVM B1-card.
- *
- * Copyright 1996 by Carsten Paeth (calle@calle.in-berlin.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-#ifndef _B1LLI_H_
-#define _B1LLI_H_
-/*
- * struct for loading t4 file
- */
-typedef struct avmb1_t4file {
- int len;
- unsigned char *data;
-} avmb1_t4file;
-
-typedef struct avmb1_loaddef {
- int contr;
- avmb1_t4file t4file;
-} avmb1_loaddef;
-
-typedef struct avmb1_loadandconfigdef {
- int contr;
- avmb1_t4file t4file;
- avmb1_t4file t4config;
-} avmb1_loadandconfigdef;
-
-typedef struct avmb1_resetdef {
- int contr;
-} avmb1_resetdef;
-
-typedef struct avmb1_getdef {
- int contr;
- int cardtype;
- int cardstate;
-} avmb1_getdef;
-
-/*
- * struct for adding new cards
- */
-typedef struct avmb1_carddef {
- int port;
- int irq;
-} avmb1_carddef;
-
-#define AVM_CARDTYPE_B1 0
-#define AVM_CARDTYPE_T1 1
-#define AVM_CARDTYPE_M1 2
-#define AVM_CARDTYPE_M2 3
-
-typedef struct avmb1_extcarddef {
- int port;
- int irq;
- int cardtype;
- int cardnr; /* for HEMA/T1 */
-} avmb1_extcarddef;
-
-#define AVMB1_LOAD 0 /* load image to card */
-#define AVMB1_ADDCARD 1 /* add a new card - OBSOLETE */
-#define AVMB1_RESETCARD 2 /* reset a card */
-#define AVMB1_LOAD_AND_CONFIG 3 /* load image and config to card */
-#define AVMB1_ADDCARD_WITH_TYPE 4 /* add a new card, with cardtype */
-#define AVMB1_GET_CARDINFO 5 /* get cardtype */
-#define AVMB1_REMOVECARD 6 /* remove a card - OBSOLETE */
-
-#define AVMB1_REGISTERCARD_IS_OBSOLETE
-
-#endif /* _B1LLI_H_ */
diff --git a/include/uapi/linux/batadv_packet.h b/include/uapi/linux/batadv_packet.h
index 2a15f01c2243..0ae34c85ef9e 100644
--- a/include/uapi/linux/batadv_packet.h
+++ b/include/uapi/linux/batadv_packet.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) */
-/* Copyright (C) 2007-2019 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2007-2020 B.A.T.M.A.N. contributors:
*
* Marek Lindner, Simon Wunderlich
*/
diff --git a/include/uapi/linux/batman_adv.h b/include/uapi/linux/batman_adv.h
index 67f4636758af..617c180ff0c8 100644
--- a/include/uapi/linux/batman_adv.h
+++ b/include/uapi/linux/batman_adv.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: MIT */
-/* Copyright (C) 2016-2019 B.A.T.M.A.N. contributors:
+/* Copyright (C) 2016-2020 B.A.T.M.A.N. contributors:
*
* Matthias Schiffer
*/
diff --git a/include/uapi/linux/bcache.h b/include/uapi/linux/bcache.h
index 5d4f58e059fd..9a1965c6c3d0 100644
--- a/include/uapi/linux/bcache.h
+++ b/include/uapi/linux/bcache.h
@@ -148,6 +148,7 @@ static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys)
#define BCACHE_SB_MAX_VERSION 4
#define SB_SECTOR 8
+#define SB_OFFSET (SB_SECTOR << SECTOR_SHIFT)
#define SB_SIZE 4096
#define SB_LABEL_SIZE 32
#define SB_JOURNAL_BUCKETS 256U
@@ -156,6 +157,57 @@ static inline struct bkey *bkey_idx(const struct bkey *k, unsigned int nr_keys)
#define BDEV_DATA_START_DEFAULT 16 /* sectors */
+struct cache_sb_disk {
+ __le64 csum;
+ __le64 offset; /* sector where this sb was written */
+ __le64 version;
+
+ __u8 magic[16];
+
+ __u8 uuid[16];
+ union {
+ __u8 set_uuid[16];
+ __le64 set_magic;
+ };
+ __u8 label[SB_LABEL_SIZE];
+
+ __le64 flags;
+ __le64 seq;
+ __le64 pad[8];
+
+ union {
+ struct {
+ /* Cache devices */
+ __le64 nbuckets; /* device size */
+
+ __le16 block_size; /* sectors */
+ __le16 bucket_size; /* sectors */
+
+ __le16 nr_in_set;
+ __le16 nr_this_dev;
+ };
+ struct {
+ /* Backing devices */
+ __le64 data_offset;
+
+ /*
+ * block_size from the cache device section is still used by
+ * backing devices, so don't add anything here until we fix
+ * things to not need it for backing devices anymore
+ */
+ };
+ };
+
+ __le32 last_mount; /* time overflow in y2106 */
+
+ __le16 first_bucket;
+ union {
+ __le16 njournal_buckets;
+ __le16 keys;
+ };
+ __le64 d[SB_JOURNAL_BUCKETS]; /* journal buckets */
+};
+
struct cache_sb {
__u64 csum;
__u64 offset; /* sector where this sb was written */
diff --git a/include/uapi/linux/blkzoned.h b/include/uapi/linux/blkzoned.h
index 498eec813494..0cdef67135f0 100644
--- a/include/uapi/linux/blkzoned.h
+++ b/include/uapi/linux/blkzoned.h
@@ -120,9 +120,11 @@ struct blk_zone_report {
};
/**
- * struct blk_zone_range - BLKRESETZONE ioctl request
- * @sector: starting sector of the first zone to issue reset write pointer
- * @nr_sectors: Total number of sectors of 1 or more zones to reset
+ * struct blk_zone_range - BLKRESETZONE/BLKOPENZONE/
+ * BLKCLOSEZONE/BLKFINISHZONE ioctl
+ * requests
+ * @sector: Starting sector of the first zone to operate on.
+ * @nr_sectors: Total number of sectors of all zones to operate on.
*/
struct blk_zone_range {
__u64 sector;
@@ -139,10 +141,19 @@ struct blk_zone_range {
* sector range. The sector range must be zone aligned.
* @BLKGETZONESZ: Get the device zone size in number of 512 B sectors.
* @BLKGETNRZONES: Get the total number of zones of the device.
+ * @BLKOPENZONE: Open the zones in the specified sector range.
+ * The 512 B sector range must be zone aligned.
+ * @BLKCLOSEZONE: Close the zones in the specified sector range.
+ * The 512 B sector range must be zone aligned.
+ * @BLKFINISHZONE: Mark the zones as full in the specified sector range.
+ * The 512 B sector range must be zone aligned.
*/
#define BLKREPORTZONE _IOWR(0x12, 130, struct blk_zone_report)
#define BLKRESETZONE _IOW(0x12, 131, struct blk_zone_range)
#define BLKGETZONESZ _IOR(0x12, 132, __u32)
#define BLKGETNRZONES _IOR(0x12, 133, __u32)
+#define BLKOPENZONE _IOW(0x12, 134, struct blk_zone_range)
+#define BLKCLOSEZONE _IOW(0x12, 135, struct blk_zone_range)
+#define BLKFINISHZONE _IOW(0x12, 136, struct blk_zone_range)
#endif /* _UAPI_BLKZONED_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 77c6be96d676..22f235260a3a 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -107,6 +107,10 @@ enum bpf_cmd {
BPF_MAP_LOOKUP_AND_DELETE_ELEM,
BPF_MAP_FREEZE,
BPF_BTF_GET_NEXT_ID,
+ BPF_MAP_LOOKUP_BATCH,
+ BPF_MAP_LOOKUP_AND_DELETE_BATCH,
+ BPF_MAP_UPDATE_BATCH,
+ BPF_MAP_DELETE_BATCH,
};
enum bpf_map_type {
@@ -136,6 +140,7 @@ enum bpf_map_type {
BPF_MAP_TYPE_STACK,
BPF_MAP_TYPE_SK_STORAGE,
BPF_MAP_TYPE_DEVMAP_HASH,
+ BPF_MAP_TYPE_STRUCT_OPS,
};
/* Note that tracing related programs such as
@@ -173,6 +178,9 @@ enum bpf_prog_type {
BPF_PROG_TYPE_CGROUP_SYSCTL,
BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE,
BPF_PROG_TYPE_CGROUP_SOCKOPT,
+ BPF_PROG_TYPE_TRACING,
+ BPF_PROG_TYPE_STRUCT_OPS,
+ BPF_PROG_TYPE_EXT,
};
enum bpf_attach_type {
@@ -199,6 +207,9 @@ enum bpf_attach_type {
BPF_CGROUP_UDP6_RECVMSG,
BPF_CGROUP_GETSOCKOPT,
BPF_CGROUP_SETSOCKOPT,
+ BPF_TRACE_RAW_TP,
+ BPF_TRACE_FENTRY,
+ BPF_TRACE_FEXIT,
__MAX_BPF_ATTACH_TYPE
};
@@ -227,6 +238,11 @@ enum bpf_attach_type {
* When children program makes decision (like picking TCP CA or sock bind)
* parent program has a chance to override it.
*
+ * With BPF_F_ALLOW_MULTI a new program is added to the end of the list of
+ * programs for a cgroup. Though it's possible to replace an old program at
+ * any position by also specifying BPF_F_REPLACE flag and position itself in
+ * replace_bpf_fd attribute. Old program at this position will be released.
+ *
* A cgroup with MULTI or OVERRIDE flag allows any attach flags in sub-cgroups.
* A cgroup with NONE doesn't allow any programs in sub-cgroups.
* Ex1:
@@ -245,6 +261,7 @@ enum bpf_attach_type {
*/
#define BPF_F_ALLOW_OVERRIDE (1U << 0)
#define BPF_F_ALLOW_MULTI (1U << 1)
+#define BPF_F_REPLACE (1U << 2)
/* If BPF_F_STRICT_ALIGNMENT is used in BPF_PROG_LOAD command, the
* verifier will perform strict alignment checking as if the kernel
@@ -344,7 +361,15 @@ enum bpf_attach_type {
/* Clone map from listener for newly accepted socket */
#define BPF_F_CLONE (1U << 9)
-/* flags for BPF_PROG_QUERY */
+/* Enable memory-mapping BPF map */
+#define BPF_F_MMAPABLE (1U << 10)
+
+/* Flags for BPF_PROG_QUERY. */
+
+/* Query effective (directly attached + inherited from ancestor cgroups)
+ * programs that will be executed for events within a cgroup.
+ * attach_flags with this flag are returned only for directly attached programs.
+ */
#define BPF_F_QUERY_EFFECTIVE (1U << 0)
enum bpf_stack_build_id_status {
@@ -384,6 +409,10 @@ union bpf_attr {
__u32 btf_fd; /* fd pointing to a BTF type data */
__u32 btf_key_type_id; /* BTF type_id of the key */
__u32 btf_value_type_id; /* BTF type_id of the value */
+ __u32 btf_vmlinux_value_type_id;/* BTF type_id of a kernel-
+ * struct stored as the
+ * map value
+ */
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -396,6 +425,23 @@ union bpf_attr {
__u64 flags;
};
+ struct { /* struct used by BPF_MAP_*_BATCH commands */
+ __aligned_u64 in_batch; /* start batch,
+ * NULL to start from beginning
+ */
+ __aligned_u64 out_batch; /* output: next start batch */
+ __aligned_u64 keys;
+ __aligned_u64 values;
+ __u32 count; /* input/output:
+ * input: # of key/value
+ * elements
+ * output: # of filled elements
+ */
+ __u32 map_fd;
+ __u64 elem_flags;
+ __u64 flags;
+ } batch;
+
struct { /* anonymous struct used by BPF_PROG_LOAD command */
__u32 prog_type; /* one of enum bpf_prog_type */
__u32 insn_cnt;
@@ -420,6 +466,8 @@ union bpf_attr {
__u32 line_info_rec_size; /* userspace bpf_line_info size */
__aligned_u64 line_info; /* line info */
__u32 line_info_cnt; /* number of bpf_line_info records */
+ __u32 attach_btf_id; /* in-kernel BTF type id to attach to */
+ __u32 attach_prog_fd; /* 0 to attach to vmlinux */
};
struct { /* anonymous struct used by BPF_OBJ_* commands */
@@ -433,6 +481,10 @@ union bpf_attr {
__u32 attach_bpf_fd; /* eBPF program to attach */
__u32 attach_type;
__u32 attach_flags;
+ __u32 replace_bpf_fd; /* previously attached eBPF
+ * program to replace if
+ * BPF_F_REPLACE is used
+ */
};
struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
@@ -560,10 +612,13 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read(void *dst, u32 size, const void *src)
+ * int bpf_probe_read(void *dst, u32 size, const void *unsafe_ptr)
* Description
* For tracing programs, safely attempt to read *size* bytes from
- * address *src* and store the data in *dst*.
+ * kernel space address *unsafe_ptr* and store the data in *dst*.
+ *
+ * Generally, use bpf_probe_read_user() or bpf_probe_read_kernel()
+ * instead.
* Return
* 0 on success, or a negative error in case of failure.
*
@@ -794,7 +849,7 @@ union bpf_attr {
* A 64-bit integer containing the current GID and UID, and
* created as such: *current_gid* **<< 32 \|** *current_uid*.
*
- * int bpf_get_current_comm(char *buf, u32 size_of_buf)
+ * int bpf_get_current_comm(void *buf, u32 size_of_buf)
* Description
* Copy the **comm** attribute of the current task into *buf* of
* *size_of_buf*. The **comm** attribute contains the name of
@@ -990,9 +1045,9 @@ union bpf_attr {
* supports redirection to the egress interface, and accepts no
* flag at all.
*
- * The same effect can be attained with the more generic
- * **bpf_redirect_map**\ (), which requires specific maps to be
- * used but offers better performance.
+ * The same effect can also be attained with the more generic
+ * **bpf_redirect_map**\ (), which uses a BPF map to store the
+ * redirect target instead of providing it directly to the helper.
* Return
* For XDP, the helper returns **XDP_REDIRECT** on success or
* **XDP_ABORTED** on error. For other program types, the values
@@ -1023,7 +1078,7 @@ union bpf_attr {
* The realm of the route for the packet associated to *skb*, or 0
* if none was found.
*
- * int bpf_perf_event_output(struct pt_regs *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * int bpf_perf_event_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
* Description
* Write raw *data* blob into a special BPF perf event held by
* *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
@@ -1068,7 +1123,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_skb_load_bytes(const struct sk_buff *skb, u32 offset, void *to, u32 len)
+ * int bpf_skb_load_bytes(const void *skb, u32 offset, void *to, u32 len)
* Description
* This helper was provided as an easy way to load data from a
* packet. It can be used to load *len* bytes from *offset* from
@@ -1085,7 +1140,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_get_stackid(struct pt_regs *ctx, struct bpf_map *map, u64 flags)
+ * int bpf_get_stackid(void *ctx, struct bpf_map *map, u64 flags)
* Description
* Walk a user or a kernel stack and return its id. To achieve
* this, the helper needs *ctx*, which is a pointer to the context
@@ -1154,7 +1209,7 @@ union bpf_attr {
* The checksum result, or a negative error code in case of
* failure.
*
- * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size)
+ * int bpf_skb_get_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
* Description
* Retrieve tunnel options metadata for the packet associated to
* *skb*, and store the raw tunnel option data to the buffer *opt*
@@ -1172,7 +1227,7 @@ union bpf_attr {
* Return
* The size of the option data retrieved.
*
- * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, u8 *opt, u32 size)
+ * int bpf_skb_set_tunnel_opt(struct sk_buff *skb, void *opt, u32 size)
* Description
* Set tunnel options metadata for the packet associated to *skb*
* to the option data contained in the raw buffer *opt* of *size*.
@@ -1425,45 +1480,14 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_probe_read_str(void *dst, int size, const void *unsafe_ptr)
+ * int bpf_probe_read_str(void *dst, u32 size, const void *unsafe_ptr)
* Description
- * Copy a NUL terminated string from an unsafe address
- * *unsafe_ptr* to *dst*. The *size* should include the
- * terminating NUL byte. In case the string length is smaller than
- * *size*, the target is not padded with further NUL bytes. If the
- * string length is larger than *size*, just *size*-1 bytes are
- * copied and the last byte is set to NUL.
- *
- * On success, the length of the copied string is returned. This
- * makes this helper useful in tracing programs for reading
- * strings, and more importantly to get its length at runtime. See
- * the following snippet:
- *
- * ::
- *
- * SEC("kprobe/sys_open")
- * void bpf_sys_open(struct pt_regs *ctx)
- * {
- * char buf[PATHLEN]; // PATHLEN is defined to 256
- * int res = bpf_probe_read_str(buf, sizeof(buf),
- * ctx->di);
- *
- * // Consume buf, for example push it to
- * // userspace via bpf_perf_event_output(); we
- * // can use res (the string length) as event
- * // size, after checking its boundaries.
- * }
- *
- * In comparison, using **bpf_probe_read()** helper here instead
- * to read the string would require to estimate the length at
- * compile time, and would often result in copying more memory
- * than necessary.
+ * Copy a NUL terminated string from an unsafe kernel address
+ * *unsafe_ptr* to *dst*. See bpf_probe_read_kernel_str() for
+ * more details.
*
- * Another useful use case is when parsing individual process
- * arguments or individual environment variables navigating
- * *current*\ **->mm->arg_start** and *current*\
- * **->mm->env_start**: using this helper and the return value,
- * one can quickly iterate at the right offset of the memory area.
+ * Generally, use bpf_probe_read_user_str() or bpf_probe_read_kernel_str()
+ * instead.
* Return
* On success, the strictly positive length of the string,
* including the trailing NUL character. On error, a negative
@@ -1511,7 +1535,7 @@ union bpf_attr {
* Return
* 0
*
- * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen)
+ * int bpf_setsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen)
* Description
* Emulate a call to **setsockopt()** on the socket associated to
* *bpf_socket*, which must be a full socket. The *level* at
@@ -1587,15 +1611,13 @@ union bpf_attr {
* the caller. Any higher bits in the *flags* argument must be
* unset.
*
- * When used to redirect packets to net devices, this helper
- * provides a high performance increase over **bpf_redirect**\ ().
- * This is due to various implementation details of the underlying
- * mechanisms, one of which is the fact that **bpf_redirect_map**\
- * () tries to send packet as a "bulk" to the device.
+ * See also bpf_redirect(), which only supports redirecting to an
+ * ifindex, but doesn't require a map to do so.
* Return
- * **XDP_REDIRECT** on success, or **XDP_ABORTED** on error.
+ * **XDP_REDIRECT** on success, or the value of the two lower bits
+ * of the **flags* argument on error.
*
- * int bpf_sk_redirect_map(struct bpf_map *map, u32 key, u64 flags)
+ * int bpf_sk_redirect_map(struct sk_buff *skb, struct bpf_map *map, u32 key, u64 flags)
* Description
* Redirect the packet to the socket referenced by *map* (of type
* **BPF_MAP_TYPE_SOCKMAP**) at index *key*. Both ingress and
@@ -1715,7 +1737,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, char *optval, int optlen)
+ * int bpf_getsockopt(struct bpf_sock_ops *bpf_socket, int level, int optname, void *optval, int optlen)
* Description
* Emulate a call to **getsockopt()** on the socket associated to
* *bpf_socket*, which must be a full socket. The *level* at
@@ -1947,7 +1969,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_get_stack(struct pt_regs *regs, void *buf, u32 size, u64 flags)
+ * int bpf_get_stack(void *ctx, void *buf, u32 size, u64 flags)
* Description
* Return a user or a kernel stack in bpf program provided buffer.
* To achieve this, the helper needs *ctx*, which is a pointer
@@ -1980,7 +2002,7 @@ union bpf_attr {
* A non-negative value equal to or less than *size* on success,
* or a negative error in case of failure.
*
- * int bpf_skb_load_bytes_relative(const struct sk_buff *skb, u32 offset, void *to, u32 len, u32 start_header)
+ * int bpf_skb_load_bytes_relative(const void *skb, u32 offset, void *to, u32 len, u32 start_header)
* Description
* This helper is similar to **bpf_skb_load_bytes**\ () in that
* it provides an easy way to load *len* bytes from *offset*
@@ -2033,7 +2055,7 @@ union bpf_attr {
* * > 0 one of **BPF_FIB_LKUP_RET_** codes explaining why the
* packet is not forwarded or needs assist from full stack
*
- * int bpf_sock_hash_update(struct bpf_sock_ops_kern *skops, struct bpf_map *map, void *key, u64 flags)
+ * int bpf_sock_hash_update(struct bpf_sock_ops *skops, struct bpf_map *map, void *key, u64 flags)
* Description
* Add an entry to, or update a sockhash *map* referencing sockets.
* The *skops* is used as a new value for the entry associated to
@@ -2392,7 +2414,7 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_msg_push_data(struct sk_buff *skb, u32 start, u32 len, u64 flags)
+ * int bpf_msg_push_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
* Description
* For socket policies, insert *len* bytes into *msg* at offset
* *start*.
@@ -2408,9 +2430,9 @@ union bpf_attr {
* Return
* 0 on success, or a negative error in case of failure.
*
- * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 pop, u64 flags)
+ * int bpf_msg_pop_data(struct sk_msg_buff *msg, u32 start, u32 len, u64 flags)
* Description
- * Will remove *pop* bytes from a *msg* starting at byte *start*.
+ * Will remove *len* bytes from a *msg* starting at byte *start*.
* This may result in **ENOMEM** errors under certain situations if
* an allocation and copy are required due to a full ring buffer.
* However, the helper will try to avoid doing the allocation
@@ -2505,7 +2527,7 @@ union bpf_attr {
* A **struct bpf_tcp_sock** pointer on success, or **NULL** in
* case of failure.
*
- * int bpf_skb_ecn_set_ce(struct sk_buf *skb)
+ * int bpf_skb_ecn_set_ce(struct sk_buff *skb)
* Description
* Set ECN (Explicit Congestion Notification) field of IP header
* to **CE** (Congestion Encountered) if current value is **ECT**
@@ -2712,7 +2734,8 @@ union bpf_attr {
*
* int bpf_send_signal(u32 sig)
* Description
- * Send signal *sig* to the current task.
+ * Send signal *sig* to the process of the current task.
+ * The signal may be delivered to any of this process's threads.
* Return
* 0 on success or successfully queued.
*
@@ -2750,6 +2773,123 @@ union bpf_attr {
* **-EOPNOTSUPP** kernel configuration does not enable SYN cookies
*
* **-EPROTONOSUPPORT** IP packet version is not 4 or 6
+ *
+ * int bpf_skb_output(void *ctx, struct bpf_map *map, u64 flags, void *data, u64 size)
+ * Description
+ * Write raw *data* blob into a special BPF perf event held by
+ * *map* of type **BPF_MAP_TYPE_PERF_EVENT_ARRAY**. This perf
+ * event must have the following attributes: **PERF_SAMPLE_RAW**
+ * as **sample_type**, **PERF_TYPE_SOFTWARE** as **type**, and
+ * **PERF_COUNT_SW_BPF_OUTPUT** as **config**.
+ *
+ * The *flags* are used to indicate the index in *map* for which
+ * the value must be put, masked with **BPF_F_INDEX_MASK**.
+ * Alternatively, *flags* can be set to **BPF_F_CURRENT_CPU**
+ * to indicate that the index of the current CPU core should be
+ * used.
+ *
+ * The value to write, of *size*, is passed through eBPF stack and
+ * pointed by *data*.
+ *
+ * *ctx* is a pointer to in-kernel struct sk_buff.
+ *
+ * This helper is similar to **bpf_perf_event_output**\ () but
+ * restricted to raw_tracepoint bpf programs.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_probe_read_user(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Safely attempt to read *size* bytes from user space address
+ * *unsafe_ptr* and store the data in *dst*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_probe_read_kernel(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Safely attempt to read *size* bytes from kernel space address
+ * *unsafe_ptr* and store the data in *dst*.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_probe_read_user_str(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Copy a NUL terminated string from an unsafe user address
+ * *unsafe_ptr* to *dst*. The *size* should include the
+ * terminating NUL byte. In case the string length is smaller than
+ * *size*, the target is not padded with further NUL bytes. If the
+ * string length is larger than *size*, just *size*-1 bytes are
+ * copied and the last byte is set to NUL.
+ *
+ * On success, the length of the copied string is returned. This
+ * makes this helper useful in tracing programs for reading
+ * strings, and more importantly to get its length at runtime. See
+ * the following snippet:
+ *
+ * ::
+ *
+ * SEC("kprobe/sys_open")
+ * void bpf_sys_open(struct pt_regs *ctx)
+ * {
+ * char buf[PATHLEN]; // PATHLEN is defined to 256
+ * int res = bpf_probe_read_user_str(buf, sizeof(buf),
+ * ctx->di);
+ *
+ * // Consume buf, for example push it to
+ * // userspace via bpf_perf_event_output(); we
+ * // can use res (the string length) as event
+ * // size, after checking its boundaries.
+ * }
+ *
+ * In comparison, using **bpf_probe_read_user()** helper here
+ * instead to read the string would require to estimate the length
+ * at compile time, and would often result in copying more memory
+ * than necessary.
+ *
+ * Another useful use case is when parsing individual process
+ * arguments or individual environment variables navigating
+ * *current*\ **->mm->arg_start** and *current*\
+ * **->mm->env_start**: using this helper and the return value,
+ * one can quickly iterate at the right offset of the memory area.
+ * Return
+ * On success, the strictly positive length of the string,
+ * including the trailing NUL character. On error, a negative
+ * value.
+ *
+ * int bpf_probe_read_kernel_str(void *dst, u32 size, const void *unsafe_ptr)
+ * Description
+ * Copy a NUL terminated string from an unsafe kernel address *unsafe_ptr*
+ * to *dst*. Same semantics as with bpf_probe_read_user_str() apply.
+ * Return
+ * On success, the strictly positive length of the string, including
+ * the trailing NUL character. On error, a negative value.
+ *
+ * int bpf_tcp_send_ack(void *tp, u32 rcv_nxt)
+ * Description
+ * Send out a tcp-ack. *tp* is the in-kernel struct tcp_sock.
+ * *rcv_nxt* is the ack_seq to be sent out.
+ * Return
+ * 0 on success, or a negative error in case of failure.
+ *
+ * int bpf_send_signal_thread(u32 sig)
+ * Description
+ * Send signal *sig* to the thread corresponding to the current task.
+ * Return
+ * 0 on success or successfully queued.
+ *
+ * **-EBUSY** if work queue under nmi is full.
+ *
+ * **-EINVAL** if *sig* is invalid.
+ *
+ * **-EPERM** if no permission to send the *sig*.
+ *
+ * **-EAGAIN** if bpf program can try again.
+ *
+ * u64 bpf_jiffies64(void)
+ * Description
+ * Obtain the 64bit jiffies
+ * Return
+ * The 64 bit jiffies
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -2862,7 +3002,15 @@ union bpf_attr {
FN(sk_storage_get), \
FN(sk_storage_delete), \
FN(send_signal), \
- FN(tcp_gen_syncookie),
+ FN(tcp_gen_syncookie), \
+ FN(skb_output), \
+ FN(probe_read_user), \
+ FN(probe_read_kernel), \
+ FN(probe_read_user_str), \
+ FN(probe_read_kernel_str), \
+ FN(tcp_send_ack), \
+ FN(send_signal_thread), \
+ FN(jiffies64),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -3263,7 +3411,7 @@ struct bpf_map_info {
__u32 map_flags;
char name[BPF_OBJ_NAME_LEN];
__u32 ifindex;
- __u32 :32;
+ __u32 btf_vmlinux_value_type_id;
__u64 netns_dev;
__u64 netns_ino;
__u32 btf_id;
diff --git a/include/uapi/linux/btf.h b/include/uapi/linux/btf.h
index c02dec97e1ce..5a667107ad2c 100644
--- a/include/uapi/linux/btf.h
+++ b/include/uapi/linux/btf.h
@@ -142,7 +142,14 @@ struct btf_param {
enum {
BTF_VAR_STATIC = 0,
- BTF_VAR_GLOBAL_ALLOCATED,
+ BTF_VAR_GLOBAL_ALLOCATED = 1,
+ BTF_VAR_GLOBAL_EXTERN = 2,
+};
+
+enum btf_func_linkage {
+ BTF_FUNC_STATIC = 0,
+ BTF_FUNC_GLOBAL = 1,
+ BTF_FUNC_EXTERN = 2,
};
/* BTF_KIND_VAR is followed by a single "struct btf_var" to describe
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 3ee0678c0a83..8134924cfc17 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -36,17 +36,24 @@ struct btrfs_ioctl_vol_args {
#define BTRFS_DEVICE_PATH_NAME_MAX 1024
#define BTRFS_SUBVOL_NAME_MAX 4039
-#define BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
+/*
+ * Deprecated since 5.7:
+ *
+ * BTRFS_SUBVOL_CREATE_ASYNC (1ULL << 0)
+ */
+
#define BTRFS_SUBVOL_RDONLY (1ULL << 1)
#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2)
#define BTRFS_DEVICE_SPEC_BY_ID (1ULL << 3)
+#define BTRFS_SUBVOL_SPEC_BY_ID (1ULL << 4)
+
#define BTRFS_VOL_ARG_V2_FLAGS_SUPPORTED \
- (BTRFS_SUBVOL_CREATE_ASYNC | \
- BTRFS_SUBVOL_RDONLY | \
+ (BTRFS_SUBVOL_RDONLY | \
BTRFS_SUBVOL_QGROUP_INHERIT | \
- BTRFS_DEVICE_SPEC_BY_ID)
+ BTRFS_DEVICE_SPEC_BY_ID | \
+ BTRFS_SUBVOL_SPEC_BY_ID)
#define BTRFS_FSID_SIZE 16
#define BTRFS_UUID_SIZE 16
@@ -97,16 +104,29 @@ struct btrfs_ioctl_qgroup_limit_args {
};
/*
- * flags for subvolumes
+ * Arguments for specification of subvolumes or devices, supporting by-name or
+ * by-id and flags
*
- * Used by:
- * struct btrfs_ioctl_vol_args_v2.flags
+ * The set of supported flags depends on the ioctl
*
* BTRFS_SUBVOL_RDONLY is also provided/consumed by the following ioctls:
* - BTRFS_IOC_SUBVOL_GETFLAGS
* - BTRFS_IOC_SUBVOL_SETFLAGS
*/
+/* Supported flags for BTRFS_IOC_RM_DEV_V2 */
+#define BTRFS_DEVICE_REMOVE_ARGS_MASK \
+ (BTRFS_DEVICE_SPEC_BY_ID)
+
+/* Supported flags for BTRFS_IOC_SNAP_CREATE_V2 and BTRFS_IOC_SUBVOL_CREATE_V2 */
+#define BTRFS_SUBVOL_CREATE_ARGS_MASK \
+ (BTRFS_SUBVOL_RDONLY | \
+ BTRFS_SUBVOL_QGROUP_INHERIT)
+
+/* Supported flags for BTRFS_IOC_SNAP_DESTROY_V2 */
+#define BTRFS_SUBVOL_DELETE_ARGS_MASK \
+ (BTRFS_SUBVOL_SPEC_BY_ID)
+
struct btrfs_ioctl_vol_args_v2 {
__s64 fd;
__u64 transid;
@@ -121,6 +141,7 @@ struct btrfs_ioctl_vol_args_v2 {
union {
char name[BTRFS_SUBVOL_NAME_MAX + 1];
__u64 devid;
+ __u64 subvolid;
};
};
@@ -270,6 +291,7 @@ struct btrfs_ioctl_fs_info_args {
#define BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA (1ULL << 8)
#define BTRFS_FEATURE_INCOMPAT_NO_HOLES (1ULL << 9)
#define BTRFS_FEATURE_INCOMPAT_METADATA_UUID (1ULL << 10)
+#define BTRFS_FEATURE_INCOMPAT_RAID1C34 (1ULL << 11)
struct btrfs_ioctl_feature_flags {
__u64 compat_flags;
@@ -831,7 +853,9 @@ enum btrfs_err_code {
BTRFS_ERROR_DEV_TGT_REPLACE,
BTRFS_ERROR_DEV_MISSING_NOT_FOUND,
BTRFS_ERROR_DEV_ONLY_WRITABLE,
- BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS
+ BTRFS_ERROR_DEV_EXCL_RUN_IN_PROGRESS,
+ BTRFS_ERROR_DEV_RAID1C3_MIN_NOT_MET,
+ BTRFS_ERROR_DEV_RAID1C4_MIN_NOT_MET,
};
#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
@@ -946,5 +970,7 @@ enum btrfs_err_code {
struct btrfs_ioctl_get_subvol_rootref_args)
#define BTRFS_IOC_INO_LOOKUP_USER _IOWR(BTRFS_IOCTL_MAGIC, 62, \
struct btrfs_ioctl_ino_lookup_user_args)
+#define BTRFS_IOC_SNAP_DESTROY_V2 _IOW(BTRFS_IOCTL_MAGIC, 63, \
+ struct btrfs_ioctl_vol_args_v2)
#endif /* _UAPI_LINUX_BTRFS_H */
diff --git a/include/uapi/linux/btrfs_tree.h b/include/uapi/linux/btrfs_tree.h
index b65c7ee75bc7..8e322e2c7e78 100644
--- a/include/uapi/linux/btrfs_tree.h
+++ b/include/uapi/linux/btrfs_tree.h
@@ -302,6 +302,9 @@
/* csum types */
enum btrfs_csum_type {
BTRFS_CSUM_TYPE_CRC32 = 0,
+ BTRFS_CSUM_TYPE_XXHASH = 1,
+ BTRFS_CSUM_TYPE_SHA256 = 2,
+ BTRFS_CSUM_TYPE_BLAKE2 = 3,
};
/*
@@ -737,10 +740,12 @@ struct btrfs_balance_item {
__le64 unused[4];
} __attribute__ ((__packed__));
-#define BTRFS_FILE_EXTENT_INLINE 0
-#define BTRFS_FILE_EXTENT_REG 1
-#define BTRFS_FILE_EXTENT_PREALLOC 2
-#define BTRFS_FILE_EXTENT_TYPES 2
+enum {
+ BTRFS_FILE_EXTENT_INLINE = 0,
+ BTRFS_FILE_EXTENT_REG = 1,
+ BTRFS_FILE_EXTENT_PREALLOC = 2,
+ BTRFS_NR_FILE_EXTENT_TYPES = 3,
+};
struct btrfs_file_extent_item {
/*
@@ -836,6 +841,8 @@ struct btrfs_dev_replace_item {
#define BTRFS_BLOCK_GROUP_RAID10 (1ULL << 6)
#define BTRFS_BLOCK_GROUP_RAID5 (1ULL << 7)
#define BTRFS_BLOCK_GROUP_RAID6 (1ULL << 8)
+#define BTRFS_BLOCK_GROUP_RAID1C3 (1ULL << 9)
+#define BTRFS_BLOCK_GROUP_RAID1C4 (1ULL << 10)
#define BTRFS_BLOCK_GROUP_RESERVED (BTRFS_AVAIL_ALLOC_BIT_SINGLE | \
BTRFS_SPACE_INFO_GLOBAL_RSV)
@@ -847,6 +854,8 @@ enum btrfs_raid_types {
BTRFS_RAID_SINGLE,
BTRFS_RAID_RAID5,
BTRFS_RAID_RAID6,
+ BTRFS_RAID_RAID1C3,
+ BTRFS_RAID_RAID1C4,
BTRFS_NR_RAID_TYPES
};
@@ -856,6 +865,8 @@ enum btrfs_raid_types {
#define BTRFS_BLOCK_GROUP_PROFILE_MASK (BTRFS_BLOCK_GROUP_RAID0 | \
BTRFS_BLOCK_GROUP_RAID1 | \
+ BTRFS_BLOCK_GROUP_RAID1C3 | \
+ BTRFS_BLOCK_GROUP_RAID1C4 | \
BTRFS_BLOCK_GROUP_RAID5 | \
BTRFS_BLOCK_GROUP_RAID6 | \
BTRFS_BLOCK_GROUP_DUP | \
@@ -863,7 +874,9 @@ enum btrfs_raid_types {
#define BTRFS_BLOCK_GROUP_RAID56_MASK (BTRFS_BLOCK_GROUP_RAID5 | \
BTRFS_BLOCK_GROUP_RAID6)
-#define BTRFS_BLOCK_GROUP_RAID1_MASK (BTRFS_BLOCK_GROUP_RAID1)
+#define BTRFS_BLOCK_GROUP_RAID1_MASK (BTRFS_BLOCK_GROUP_RAID1 | \
+ BTRFS_BLOCK_GROUP_RAID1C3 | \
+ BTRFS_BLOCK_GROUP_RAID1C4)
/*
* We need a bit for restriper to be able to tell when chunks of type
diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h
index 240fdb9a60f6..272dc69fa080 100644
--- a/include/uapi/linux/capability.h
+++ b/include/uapi/linux/capability.h
@@ -301,6 +301,7 @@ struct vfs_ns_cap_data {
/* Allow more than 64hz interrupts from the real-time clock */
/* Override max number of consoles on console allocation */
/* Override max number of keymaps */
+/* Control memory reclaim behavior */
#define CAP_SYS_RESOURCE 24
diff --git a/include/uapi/linux/cec-funcs.h b/include/uapi/linux/cec-funcs.h
index 8997d5068c08..37590027b604 100644
--- a/include/uapi/linux/cec-funcs.h
+++ b/include/uapi/linux/cec-funcs.h
@@ -923,7 +923,8 @@ static inline void cec_msg_give_deck_status(struct cec_msg *msg,
msg->len = 3;
msg->msg[1] = CEC_MSG_GIVE_DECK_STATUS;
msg->msg[2] = status_req;
- msg->reply = reply ? CEC_MSG_DECK_STATUS : 0;
+ msg->reply = (reply && status_req != CEC_OP_STATUS_REQ_OFF) ?
+ CEC_MSG_DECK_STATUS : 0;
}
static inline void cec_ops_give_deck_status(const struct cec_msg *msg,
@@ -1027,7 +1028,8 @@ static inline void cec_msg_give_tuner_device_status(struct cec_msg *msg,
msg->len = 3;
msg->msg[1] = CEC_MSG_GIVE_TUNER_DEVICE_STATUS;
msg->msg[2] = status_req;
- msg->reply = reply ? CEC_MSG_TUNER_DEVICE_STATUS : 0;
+ msg->reply = (reply && status_req != CEC_OP_STATUS_REQ_OFF) ?
+ CEC_MSG_TUNER_DEVICE_STATUS : 0;
}
static inline void cec_ops_give_tuner_device_status(const struct cec_msg *msg,
@@ -1302,17 +1304,17 @@ static inline void cec_msg_user_control_pressed(struct cec_msg *msg,
if (!ui_cmd->has_opt_arg)
return;
switch (ui_cmd->ui_cmd) {
- case 0x56:
- case 0x57:
- case 0x60:
- case 0x68:
- case 0x69:
- case 0x6a:
+ case CEC_OP_UI_CMD_SELECT_BROADCAST_TYPE:
+ case CEC_OP_UI_CMD_SELECT_SOUND_PRESENTATION:
+ case CEC_OP_UI_CMD_PLAY_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_MEDIA_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_AV_INPUT_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_AUDIO_INPUT_FUNCTION:
/* The optional operand is one byte for all these ui commands */
msg->len++;
msg->msg[3] = ui_cmd->play_mode;
break;
- case 0x67:
+ case CEC_OP_UI_CMD_TUNE_FUNCTION:
msg->len += 4;
msg->msg[3] = (ui_cmd->channel_identifier.channel_number_fmt << 2) |
(ui_cmd->channel_identifier.major >> 8);
@@ -1331,17 +1333,17 @@ static inline void cec_ops_user_control_pressed(const struct cec_msg *msg,
if (msg->len == 3)
return;
switch (ui_cmd->ui_cmd) {
- case 0x56:
- case 0x57:
- case 0x60:
- case 0x68:
- case 0x69:
- case 0x6a:
+ case CEC_OP_UI_CMD_SELECT_BROADCAST_TYPE:
+ case CEC_OP_UI_CMD_SELECT_SOUND_PRESENTATION:
+ case CEC_OP_UI_CMD_PLAY_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_MEDIA_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_AV_INPUT_FUNCTION:
+ case CEC_OP_UI_CMD_SELECT_AUDIO_INPUT_FUNCTION:
/* The optional operand is one byte for all these ui commands */
ui_cmd->play_mode = msg->msg[3];
ui_cmd->has_opt_arg = 1;
break;
- case 0x67:
+ case CEC_OP_UI_CMD_TUNE_FUNCTION:
if (msg->len < 7)
break;
ui_cmd->has_opt_arg = 1;
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
index 5704fa0292b5..7d1a06c52469 100644
--- a/include/uapi/linux/cec.h
+++ b/include/uapi/linux/cec.h
@@ -317,6 +317,8 @@ static inline int cec_is_unconfigured(__u16 log_addr_mask)
#define CEC_CAP_NEEDS_HPD (1 << 6)
/* Hardware can monitor CEC pin transitions */
#define CEC_CAP_MONITOR_PIN (1 << 7)
+/* CEC_ADAP_G_CONNECTOR_INFO is available */
+#define CEC_CAP_CONNECTOR_INFO (1 << 8)
/**
* struct cec_caps - CEC capabilities structure.
@@ -375,6 +377,34 @@ struct cec_log_addrs {
/* CDC-Only device: supports only CDC messages */
#define CEC_LOG_ADDRS_FL_CDC_ONLY (1 << 2)
+/**
+ * struct cec_drm_connector_info - tells which drm connector is
+ * associated with the CEC adapter.
+ * @card_no: drm card number
+ * @connector_id: drm connector ID
+ */
+struct cec_drm_connector_info {
+ __u32 card_no;
+ __u32 connector_id;
+};
+
+#define CEC_CONNECTOR_TYPE_NO_CONNECTOR 0
+#define CEC_CONNECTOR_TYPE_DRM 1
+
+/**
+ * struct cec_connector_info - tells if and which connector is
+ * associated with the CEC adapter.
+ * @type: connector type (if any)
+ * @drm: drm connector info
+ */
+struct cec_connector_info {
+ __u32 type;
+ union {
+ struct cec_drm_connector_info drm;
+ __u32 raw[16];
+ };
+};
+
/* Events */
/* Event that occurs when the adapter state changes */
@@ -398,10 +428,17 @@ struct cec_log_addrs {
* struct cec_event_state_change - used when the CEC adapter changes state.
* @phys_addr: the current physical address
* @log_addr_mask: the current logical address mask
+ * @have_conn_info: if non-zero, then HDMI connector information is available.
+ * This field is only valid if CEC_CAP_CONNECTOR_INFO is set. If that
+ * capability is set and @have_conn_info is zero, then that indicates
+ * that the HDMI connector device is not instantiated, either because
+ * the HDMI driver is still configuring the device or because the HDMI
+ * device was unbound.
*/
struct cec_event_state_change {
__u16 phys_addr;
__u16 log_addr_mask;
+ __u16 have_conn_info;
};
/**
@@ -476,6 +513,9 @@ struct cec_event {
#define CEC_G_MODE _IOR('a', 8, __u32)
#define CEC_S_MODE _IOW('a', 9, __u32)
+/* Get the connector info */
+#define CEC_ADAP_G_CONNECTOR_INFO _IOR('a', 10, struct cec_connector_info)
+
/*
* The remainder of this header defines all CEC messages and operands.
* The format matters since it the cec-ctl utility parses it to generate
@@ -768,8 +808,8 @@ struct cec_event {
#define CEC_MSG_SELECT_DIGITAL_SERVICE 0x93
#define CEC_MSG_TUNER_DEVICE_STATUS 0x07
/* Recording Flag Operand (rec_flag) */
-#define CEC_OP_REC_FLAG_USED 0
-#define CEC_OP_REC_FLAG_NOT_USED 1
+#define CEC_OP_REC_FLAG_NOT_USED 0
+#define CEC_OP_REC_FLAG_USED 1
/* Tuner Display Info Operand (tuner_display_info) */
#define CEC_OP_TUNER_DISPLAY_INFO_DIGITAL 0
#define CEC_OP_TUNER_DISPLAY_INFO_NONE 1
@@ -820,6 +860,95 @@ struct cec_event {
#define CEC_OP_MENU_STATE_DEACTIVATED 0x01
#define CEC_MSG_USER_CONTROL_PRESSED 0x44
+/* UI Command Operand (ui_cmd) */
+#define CEC_OP_UI_CMD_SELECT 0x00
+#define CEC_OP_UI_CMD_UP 0x01
+#define CEC_OP_UI_CMD_DOWN 0x02
+#define CEC_OP_UI_CMD_LEFT 0x03
+#define CEC_OP_UI_CMD_RIGHT 0x04
+#define CEC_OP_UI_CMD_RIGHT_UP 0x05
+#define CEC_OP_UI_CMD_RIGHT_DOWN 0x06
+#define CEC_OP_UI_CMD_LEFT_UP 0x07
+#define CEC_OP_UI_CMD_LEFT_DOWN 0x08
+#define CEC_OP_UI_CMD_DEVICE_ROOT_MENU 0x09
+#define CEC_OP_UI_CMD_DEVICE_SETUP_MENU 0x0a
+#define CEC_OP_UI_CMD_CONTENTS_MENU 0x0b
+#define CEC_OP_UI_CMD_FAVORITE_MENU 0x0c
+#define CEC_OP_UI_CMD_BACK 0x0d
+#define CEC_OP_UI_CMD_MEDIA_TOP_MENU 0x10
+#define CEC_OP_UI_CMD_MEDIA_CONTEXT_SENSITIVE_MENU 0x11
+#define CEC_OP_UI_CMD_NUMBER_ENTRY_MODE 0x1d
+#define CEC_OP_UI_CMD_NUMBER_11 0x1e
+#define CEC_OP_UI_CMD_NUMBER_12 0x1f
+#define CEC_OP_UI_CMD_NUMBER_0_OR_NUMBER_10 0x20
+#define CEC_OP_UI_CMD_NUMBER_1 0x21
+#define CEC_OP_UI_CMD_NUMBER_2 0x22
+#define CEC_OP_UI_CMD_NUMBER_3 0x23
+#define CEC_OP_UI_CMD_NUMBER_4 0x24
+#define CEC_OP_UI_CMD_NUMBER_5 0x25
+#define CEC_OP_UI_CMD_NUMBER_6 0x26
+#define CEC_OP_UI_CMD_NUMBER_7 0x27
+#define CEC_OP_UI_CMD_NUMBER_8 0x28
+#define CEC_OP_UI_CMD_NUMBER_9 0x29
+#define CEC_OP_UI_CMD_DOT 0x2a
+#define CEC_OP_UI_CMD_ENTER 0x2b
+#define CEC_OP_UI_CMD_CLEAR 0x2c
+#define CEC_OP_UI_CMD_NEXT_FAVORITE 0x2f
+#define CEC_OP_UI_CMD_CHANNEL_UP 0x30
+#define CEC_OP_UI_CMD_CHANNEL_DOWN 0x31
+#define CEC_OP_UI_CMD_PREVIOUS_CHANNEL 0x32
+#define CEC_OP_UI_CMD_SOUND_SELECT 0x33
+#define CEC_OP_UI_CMD_INPUT_SELECT 0x34
+#define CEC_OP_UI_CMD_DISPLAY_INFORMATION 0x35
+#define CEC_OP_UI_CMD_HELP 0x36
+#define CEC_OP_UI_CMD_PAGE_UP 0x37
+#define CEC_OP_UI_CMD_PAGE_DOWN 0x38
+#define CEC_OP_UI_CMD_POWER 0x40
+#define CEC_OP_UI_CMD_VOLUME_UP 0x41
+#define CEC_OP_UI_CMD_VOLUME_DOWN 0x42
+#define CEC_OP_UI_CMD_MUTE 0x43
+#define CEC_OP_UI_CMD_PLAY 0x44
+#define CEC_OP_UI_CMD_STOP 0x45
+#define CEC_OP_UI_CMD_PAUSE 0x46
+#define CEC_OP_UI_CMD_RECORD 0x47
+#define CEC_OP_UI_CMD_REWIND 0x48
+#define CEC_OP_UI_CMD_FAST_FORWARD 0x49
+#define CEC_OP_UI_CMD_EJECT 0x4a
+#define CEC_OP_UI_CMD_SKIP_FORWARD 0x4b
+#define CEC_OP_UI_CMD_SKIP_BACKWARD 0x4c
+#define CEC_OP_UI_CMD_STOP_RECORD 0x4d
+#define CEC_OP_UI_CMD_PAUSE_RECORD 0x4e
+#define CEC_OP_UI_CMD_ANGLE 0x50
+#define CEC_OP_UI_CMD_SUB_PICTURE 0x51
+#define CEC_OP_UI_CMD_VIDEO_ON_DEMAND 0x52
+#define CEC_OP_UI_CMD_ELECTRONIC_PROGRAM_GUIDE 0x53
+#define CEC_OP_UI_CMD_TIMER_PROGRAMMING 0x54
+#define CEC_OP_UI_CMD_INITIAL_CONFIGURATION 0x55
+#define CEC_OP_UI_CMD_SELECT_BROADCAST_TYPE 0x56
+#define CEC_OP_UI_CMD_SELECT_SOUND_PRESENTATION 0x57
+#define CEC_OP_UI_CMD_AUDIO_DESCRIPTION 0x58
+#define CEC_OP_UI_CMD_INTERNET 0x59
+#define CEC_OP_UI_CMD_3D_MODE 0x5a
+#define CEC_OP_UI_CMD_PLAY_FUNCTION 0x60
+#define CEC_OP_UI_CMD_PAUSE_PLAY_FUNCTION 0x61
+#define CEC_OP_UI_CMD_RECORD_FUNCTION 0x62
+#define CEC_OP_UI_CMD_PAUSE_RECORD_FUNCTION 0x63
+#define CEC_OP_UI_CMD_STOP_FUNCTION 0x64
+#define CEC_OP_UI_CMD_MUTE_FUNCTION 0x65
+#define CEC_OP_UI_CMD_RESTORE_VOLUME_FUNCTION 0x66
+#define CEC_OP_UI_CMD_TUNE_FUNCTION 0x67
+#define CEC_OP_UI_CMD_SELECT_MEDIA_FUNCTION 0x68
+#define CEC_OP_UI_CMD_SELECT_AV_INPUT_FUNCTION 0x69
+#define CEC_OP_UI_CMD_SELECT_AUDIO_INPUT_FUNCTION 0x6a
+#define CEC_OP_UI_CMD_POWER_TOGGLE_FUNCTION 0x6b
+#define CEC_OP_UI_CMD_POWER_OFF_FUNCTION 0x6c
+#define CEC_OP_UI_CMD_POWER_ON_FUNCTION 0x6d
+#define CEC_OP_UI_CMD_F1_BLUE 0x71
+#define CEC_OP_UI_CMD_F2_RED 0x72
+#define CEC_OP_UI_CMD_F3_GREEN 0x73
+#define CEC_OP_UI_CMD_F4_YELLOW 0x74
+#define CEC_OP_UI_CMD_F5 0x75
+#define CEC_OP_UI_CMD_DATA 0x76
/* UI Broadcast Type Operand (ui_bcast_type) */
#define CEC_OP_UI_BCAST_TYPE_TOGGLE_ALL 0x00
#define CEC_OP_UI_BCAST_TYPE_TOGGLE_DIG_ANA 0x01
diff --git a/include/uapi/linux/chio.h b/include/uapi/linux/chio.h
index 689fc93fafda..e1cad4c319ee 100644
--- a/include/uapi/linux/chio.h
+++ b/include/uapi/linux/chio.h
@@ -3,6 +3,9 @@
* ioctl interface for the scsi media changer driver
*/
+#ifndef _UAPI_LINUX_CHIO_H
+#define _UAPI_LINUX_CHIO_H
+
/* changer element types */
#define CHET_MT 0 /* media transport element (robot) */
#define CHET_ST 1 /* storage element (media slots) */
@@ -160,10 +163,4 @@ struct changer_set_voltag {
#define CHIOSVOLTAG _IOW('c',18,struct changer_set_voltag)
#define CHIOGVPARAMS _IOR('c',19,struct changer_vendor_params)
-/* ---------------------------------------------------------------------- */
-
-/*
- * Local variables:
- * c-basic-offset: 8
- * End:
- */
+#endif /* _UAPI_LINUX_CHIO_H */
diff --git a/include/uapi/linux/cyclades.h b/include/uapi/linux/cyclades.h
index 8279bc3d60ca..fc0add2194a9 100644
--- a/include/uapi/linux/cyclades.h
+++ b/include/uapi/linux/cyclades.h
@@ -83,9 +83,9 @@ struct cyclades_monitor {
* open)
*/
struct cyclades_idle_stats {
- __kernel_time_t in_use; /* Time device has been in use (secs) */
- __kernel_time_t recv_idle; /* Time since last char received (secs) */
- __kernel_time_t xmit_idle; /* Time since last char transmitted (secs) */
+ __kernel_old_time_t in_use; /* Time device has been in use (secs) */
+ __kernel_old_time_t recv_idle; /* Time since last char received (secs) */
+ __kernel_old_time_t xmit_idle; /* Time since last char transmitted (secs) */
unsigned long recv_bytes; /* Bytes received */
unsigned long xmit_bytes; /* Bytes transmitted */
unsigned long overruns; /* Input overruns */
diff --git a/include/uapi/linux/dcbnl.h b/include/uapi/linux/dcbnl.h
index 69df19aa8e72..a791a94013a6 100644
--- a/include/uapi/linux/dcbnl.h
+++ b/include/uapi/linux/dcbnl.h
@@ -286,7 +286,7 @@ struct dcbmsg {
* @DCB_CMD_GNUMTCS: get the number of traffic classes currently supported
* @DCB_CMD_SNUMTCS: set the number of traffic classes
* @DCB_CMD_GBCN: set backward congestion notification configuration
- * @DCB_CMD_SBCN: get backward congestion notification configration.
+ * @DCB_CMD_SBCN: get backward congestion notification configuration.
* @DCB_CMD_GAPP: get application protocol configuration
* @DCB_CMD_SAPP: set application protocol configuration
* @DCB_CMD_IEEE_SET: set IEEE 802.1Qaz configuration
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index a8a2174db030..ae37fd4d194a 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -422,6 +422,10 @@ enum devlink_attr {
DEVLINK_ATTR_RELOAD_FAILED, /* u8 0 or 1 */
DEVLINK_ATTR_HEALTH_REPORTER_DUMP_TS_NS, /* u64 */
+
+ DEVLINK_ATTR_NETNS_FD, /* u32 */
+ DEVLINK_ATTR_NETNS_PID, /* u32 */
+ DEVLINK_ATTR_NETNS_ID, /* u32 */
/* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX,
diff --git a/include/uapi/linux/dm-ioctl.h b/include/uapi/linux/dm-ioctl.h
index 2df8ceca1f9b..6622912c2342 100644
--- a/include/uapi/linux/dm-ioctl.h
+++ b/include/uapi/linux/dm-ioctl.h
@@ -272,9 +272,9 @@ enum {
#define DM_DEV_SET_GEOMETRY _IOWR(DM_IOCTL, DM_DEV_SET_GEOMETRY_CMD, struct dm_ioctl)
#define DM_VERSION_MAJOR 4
-#define DM_VERSION_MINOR 41
+#define DM_VERSION_MINOR 42
#define DM_VERSION_PATCHLEVEL 0
-#define DM_VERSION_EXTRA "-ioctl (2019-09-16)"
+#define DM_VERSION_EXTRA "-ioctl (2020-02-27)"
/* Status bits */
#define DM_READONLY_FLAG (1 << 0) /* In/Out */
diff --git a/include/uapi/linux/dma-heap.h b/include/uapi/linux/dma-heap.h
new file mode 100644
index 000000000000..6f84fa08e074
--- /dev/null
+++ b/include/uapi/linux/dma-heap.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * DMABUF Heaps Userspace API
+ *
+ * Copyright (C) 2011 Google, Inc.
+ * Copyright (C) 2019 Linaro Ltd.
+ */
+#ifndef _UAPI_LINUX_DMABUF_POOL_H
+#define _UAPI_LINUX_DMABUF_POOL_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/**
+ * DOC: DMABUF Heaps Userspace API
+ */
+
+/* Valid FD_FLAGS are O_CLOEXEC, O_RDONLY, O_WRONLY, O_RDWR */
+#define DMA_HEAP_VALID_FD_FLAGS (O_CLOEXEC | O_ACCMODE)
+
+/* Currently no heap flags */
+#define DMA_HEAP_VALID_HEAP_FLAGS (0)
+
+/**
+ * struct dma_heap_allocation_data - metadata passed from userspace for
+ * allocations
+ * @len: size of the allocation
+ * @fd: will be populated with a fd which provides the
+ * handle to the allocated dma-buf
+ * @fd_flags: file descriptor flags used when allocating
+ * @heap_flags: flags passed to heap
+ *
+ * Provided by userspace as an argument to the ioctl
+ */
+struct dma_heap_allocation_data {
+ __u64 len;
+ __u32 fd;
+ __u32 fd_flags;
+ __u64 heap_flags;
+};
+
+#define DMA_HEAP_IOC_MAGIC 'H'
+
+/**
+ * DOC: DMA_HEAP_IOCTL_ALLOC - allocate memory from pool
+ *
+ * Takes a dma_heap_allocation_data struct and returns it with the fd field
+ * populated with the dmabuf handle of the allocation.
+ */
+#define DMA_HEAP_IOCTL_ALLOC _IOWR(DMA_HEAP_IOC_MAGIC, 0x0,\
+ struct dma_heap_allocation_data)
+
+#endif /* _UAPI_LINUX_DMABUF_POOL_H */
diff --git a/include/uapi/linux/elfcore.h b/include/uapi/linux/elfcore.h
index 0b2c9e16e345..baf03562306d 100644
--- a/include/uapi/linux/elfcore.h
+++ b/include/uapi/linux/elfcore.h
@@ -53,10 +53,10 @@ struct elf_prstatus
pid_t pr_ppid;
pid_t pr_pgrp;
pid_t pr_sid;
- struct timeval pr_utime; /* User time */
- struct timeval pr_stime; /* System time */
- struct timeval pr_cutime; /* Cumulative user time */
- struct timeval pr_cstime; /* Cumulative system time */
+ struct __kernel_old_timeval pr_utime; /* User time */
+ struct __kernel_old_timeval pr_stime; /* System time */
+ struct __kernel_old_timeval pr_cutime; /* Cumulative user time */
+ struct __kernel_old_timeval pr_cstime; /* Cumulative system time */
#if 0
long pr_instr; /* Current instruction */
#endif
diff --git a/include/uapi/linux/errqueue.h b/include/uapi/linux/errqueue.h
index 28491dac074b..0cca19670fd2 100644
--- a/include/uapi/linux/errqueue.h
+++ b/include/uapi/linux/errqueue.h
@@ -37,9 +37,16 @@ struct sock_extended_err {
* The timestamping interfaces SO_TIMESTAMPING, MSG_TSTAMP_*
* communicate network timestamps by passing this struct in a cmsg with
* recvmsg(). See Documentation/networking/timestamping.txt for details.
+ * User space sees a timespec definition that matches either
+ * __kernel_timespec or __kernel_old_timespec, in the kernel we
+ * require two structure definitions to provide both.
*/
struct scm_timestamping {
+#ifdef __KERNEL__
+ struct __kernel_old_timespec ts[3];
+#else
struct timespec ts[3];
+#endif
};
struct scm_timestamping64 {
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 8938b76c4ee3..4295ebfa2f91 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -593,6 +593,9 @@ struct ethtool_pauseparam {
* @ETH_SS_RSS_HASH_FUNCS: RSS hush function names
* @ETH_SS_PHY_STATS: Statistic names, for use with %ETHTOOL_GPHYSTATS
* @ETH_SS_PHY_TUNABLES: PHY tunable names
+ * @ETH_SS_LINK_MODES: link mode names
+ * @ETH_SS_MSG_CLASSES: debug message class names
+ * @ETH_SS_WOL_MODES: wake-on-lan modes
*/
enum ethtool_stringset {
ETH_SS_TEST = 0,
@@ -604,6 +607,12 @@ enum ethtool_stringset {
ETH_SS_TUNABLES,
ETH_SS_PHY_STATS,
ETH_SS_PHY_TUNABLES,
+ ETH_SS_LINK_MODES,
+ ETH_SS_MSG_CLASSES,
+ ETH_SS_WOL_MODES,
+
+ /* add new constants above here */
+ ETH_SS_COUNT
};
/**
@@ -1507,6 +1516,11 @@ enum ethtool_link_mode_bit_indices {
ETHTOOL_LINK_MODE_200000baseCR4_Full_BIT = 66,
ETHTOOL_LINK_MODE_100baseT1_Full_BIT = 67,
ETHTOOL_LINK_MODE_1000baseT1_Full_BIT = 68,
+ ETHTOOL_LINK_MODE_400000baseKR8_Full_BIT = 69,
+ ETHTOOL_LINK_MODE_400000baseSR8_Full_BIT = 70,
+ ETHTOOL_LINK_MODE_400000baseLR8_ER8_FR8_Full_BIT = 71,
+ ETHTOOL_LINK_MODE_400000baseDR8_Full_BIT = 72,
+ ETHTOOL_LINK_MODE_400000baseCR8_Full_BIT = 73,
/* must be last entry */
__ETHTOOL_LINK_MODE_MASK_NBITS
@@ -1618,6 +1632,7 @@ enum ethtool_link_mode_bit_indices {
#define SPEED_56000 56000
#define SPEED_100000 100000
#define SPEED_200000 200000
+#define SPEED_400000 400000
#define SPEED_UNKNOWN -1
@@ -1682,6 +1697,8 @@ static inline int ethtool_validate_duplex(__u8 duplex)
#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */
#define WAKE_FILTER (1 << 7)
+#define WOL_MODE_COUNT 8
+
/* L2-L4 network traffic flow types */
#define TCP_V4_FLOW 0x01 /* hash or spec (tcp_ip4_spec) */
#define UDP_V4_FLOW 0x02 /* hash or spec (udp_ip4_spec) */
diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h
new file mode 100644
index 000000000000..7e0b460f872c
--- /dev/null
+++ b/include/uapi/linux/ethtool_netlink.h
@@ -0,0 +1,237 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * include/uapi/linux/ethtool_netlink.h - netlink interface for ethtool
+ *
+ * See Documentation/networking/ethtool-netlink.txt in kernel source tree for
+ * doucumentation of the interface.
+ */
+
+#ifndef _UAPI_LINUX_ETHTOOL_NETLINK_H_
+#define _UAPI_LINUX_ETHTOOL_NETLINK_H_
+
+#include <linux/ethtool.h>
+
+/* message types - userspace to kernel */
+enum {
+ ETHTOOL_MSG_USER_NONE,
+ ETHTOOL_MSG_STRSET_GET,
+ ETHTOOL_MSG_LINKINFO_GET,
+ ETHTOOL_MSG_LINKINFO_SET,
+ ETHTOOL_MSG_LINKMODES_GET,
+ ETHTOOL_MSG_LINKMODES_SET,
+ ETHTOOL_MSG_LINKSTATE_GET,
+ ETHTOOL_MSG_DEBUG_GET,
+ ETHTOOL_MSG_DEBUG_SET,
+ ETHTOOL_MSG_WOL_GET,
+ ETHTOOL_MSG_WOL_SET,
+
+ /* add new constants above here */
+ __ETHTOOL_MSG_USER_CNT,
+ ETHTOOL_MSG_USER_MAX = __ETHTOOL_MSG_USER_CNT - 1
+};
+
+/* message types - kernel to userspace */
+enum {
+ ETHTOOL_MSG_KERNEL_NONE,
+ ETHTOOL_MSG_STRSET_GET_REPLY,
+ ETHTOOL_MSG_LINKINFO_GET_REPLY,
+ ETHTOOL_MSG_LINKINFO_NTF,
+ ETHTOOL_MSG_LINKMODES_GET_REPLY,
+ ETHTOOL_MSG_LINKMODES_NTF,
+ ETHTOOL_MSG_LINKSTATE_GET_REPLY,
+ ETHTOOL_MSG_DEBUG_GET_REPLY,
+ ETHTOOL_MSG_DEBUG_NTF,
+ ETHTOOL_MSG_WOL_GET_REPLY,
+ ETHTOOL_MSG_WOL_NTF,
+
+ /* add new constants above here */
+ __ETHTOOL_MSG_KERNEL_CNT,
+ ETHTOOL_MSG_KERNEL_MAX = __ETHTOOL_MSG_KERNEL_CNT - 1
+};
+
+/* request header */
+
+/* use compact bitsets in reply */
+#define ETHTOOL_FLAG_COMPACT_BITSETS (1 << 0)
+/* provide optional reply for SET or ACT requests */
+#define ETHTOOL_FLAG_OMIT_REPLY (1 << 1)
+
+#define ETHTOOL_FLAG_ALL (ETHTOOL_FLAG_COMPACT_BITSETS | \
+ ETHTOOL_FLAG_OMIT_REPLY)
+
+enum {
+ ETHTOOL_A_HEADER_UNSPEC,
+ ETHTOOL_A_HEADER_DEV_INDEX, /* u32 */
+ ETHTOOL_A_HEADER_DEV_NAME, /* string */
+ ETHTOOL_A_HEADER_FLAGS, /* u32 - ETHTOOL_FLAG_* */
+
+ /* add new constants above here */
+ __ETHTOOL_A_HEADER_CNT,
+ ETHTOOL_A_HEADER_MAX = __ETHTOOL_A_HEADER_CNT - 1
+};
+
+/* bit sets */
+
+enum {
+ ETHTOOL_A_BITSET_BIT_UNSPEC,
+ ETHTOOL_A_BITSET_BIT_INDEX, /* u32 */
+ ETHTOOL_A_BITSET_BIT_NAME, /* string */
+ ETHTOOL_A_BITSET_BIT_VALUE, /* flag */
+
+ /* add new constants above here */
+ __ETHTOOL_A_BITSET_BIT_CNT,
+ ETHTOOL_A_BITSET_BIT_MAX = __ETHTOOL_A_BITSET_BIT_CNT - 1
+};
+
+enum {
+ ETHTOOL_A_BITSET_BITS_UNSPEC,
+ ETHTOOL_A_BITSET_BITS_BIT, /* nest - _A_BITSET_BIT_* */
+
+ /* add new constants above here */
+ __ETHTOOL_A_BITSET_BITS_CNT,
+ ETHTOOL_A_BITSET_BITS_MAX = __ETHTOOL_A_BITSET_BITS_CNT - 1
+};
+
+enum {
+ ETHTOOL_A_BITSET_UNSPEC,
+ ETHTOOL_A_BITSET_NOMASK, /* flag */
+ ETHTOOL_A_BITSET_SIZE, /* u32 */
+ ETHTOOL_A_BITSET_BITS, /* nest - _A_BITSET_BITS_* */
+ ETHTOOL_A_BITSET_VALUE, /* binary */
+ ETHTOOL_A_BITSET_MASK, /* binary */
+
+ /* add new constants above here */
+ __ETHTOOL_A_BITSET_CNT,
+ ETHTOOL_A_BITSET_MAX = __ETHTOOL_A_BITSET_CNT - 1
+};
+
+/* string sets */
+
+enum {
+ ETHTOOL_A_STRING_UNSPEC,
+ ETHTOOL_A_STRING_INDEX, /* u32 */
+ ETHTOOL_A_STRING_VALUE, /* string */
+
+ /* add new constants above here */
+ __ETHTOOL_A_STRING_CNT,
+ ETHTOOL_A_STRING_MAX = __ETHTOOL_A_STRING_CNT - 1
+};
+
+enum {
+ ETHTOOL_A_STRINGS_UNSPEC,
+ ETHTOOL_A_STRINGS_STRING, /* nest - _A_STRINGS_* */
+
+ /* add new constants above here */
+ __ETHTOOL_A_STRINGS_CNT,
+ ETHTOOL_A_STRINGS_MAX = __ETHTOOL_A_STRINGS_CNT - 1
+};
+
+enum {
+ ETHTOOL_A_STRINGSET_UNSPEC,
+ ETHTOOL_A_STRINGSET_ID, /* u32 */
+ ETHTOOL_A_STRINGSET_COUNT, /* u32 */
+ ETHTOOL_A_STRINGSET_STRINGS, /* nest - _A_STRINGS_* */
+
+ /* add new constants above here */
+ __ETHTOOL_A_STRINGSET_CNT,
+ ETHTOOL_A_STRINGSET_MAX = __ETHTOOL_A_STRINGSET_CNT - 1
+};
+
+enum {
+ ETHTOOL_A_STRINGSETS_UNSPEC,
+ ETHTOOL_A_STRINGSETS_STRINGSET, /* nest - _A_STRINGSET_* */
+
+ /* add new constants above here */
+ __ETHTOOL_A_STRINGSETS_CNT,
+ ETHTOOL_A_STRINGSETS_MAX = __ETHTOOL_A_STRINGSETS_CNT - 1
+};
+
+/* STRSET */
+
+enum {
+ ETHTOOL_A_STRSET_UNSPEC,
+ ETHTOOL_A_STRSET_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_STRSET_STRINGSETS, /* nest - _A_STRINGSETS_* */
+ ETHTOOL_A_STRSET_COUNTS_ONLY, /* flag */
+
+ /* add new constants above here */
+ __ETHTOOL_A_STRSET_CNT,
+ ETHTOOL_A_STRSET_MAX = __ETHTOOL_A_STRSET_CNT - 1
+};
+
+/* LINKINFO */
+
+enum {
+ ETHTOOL_A_LINKINFO_UNSPEC,
+ ETHTOOL_A_LINKINFO_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_LINKINFO_PORT, /* u8 */
+ ETHTOOL_A_LINKINFO_PHYADDR, /* u8 */
+ ETHTOOL_A_LINKINFO_TP_MDIX, /* u8 */
+ ETHTOOL_A_LINKINFO_TP_MDIX_CTRL, /* u8 */
+ ETHTOOL_A_LINKINFO_TRANSCEIVER, /* u8 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_LINKINFO_CNT,
+ ETHTOOL_A_LINKINFO_MAX = __ETHTOOL_A_LINKINFO_CNT - 1
+};
+
+/* LINKMODES */
+
+enum {
+ ETHTOOL_A_LINKMODES_UNSPEC,
+ ETHTOOL_A_LINKMODES_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_LINKMODES_AUTONEG, /* u8 */
+ ETHTOOL_A_LINKMODES_OURS, /* bitset */
+ ETHTOOL_A_LINKMODES_PEER, /* bitset */
+ ETHTOOL_A_LINKMODES_SPEED, /* u32 */
+ ETHTOOL_A_LINKMODES_DUPLEX, /* u8 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_LINKMODES_CNT,
+ ETHTOOL_A_LINKMODES_MAX = __ETHTOOL_A_LINKMODES_CNT - 1
+};
+
+/* LINKSTATE */
+
+enum {
+ ETHTOOL_A_LINKSTATE_UNSPEC,
+ ETHTOOL_A_LINKSTATE_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_LINKSTATE_LINK, /* u8 */
+
+ /* add new constants above here */
+ __ETHTOOL_A_LINKSTATE_CNT,
+ ETHTOOL_A_LINKSTATE_MAX = __ETHTOOL_A_LINKSTATE_CNT - 1
+};
+
+/* DEBUG */
+
+enum {
+ ETHTOOL_A_DEBUG_UNSPEC,
+ ETHTOOL_A_DEBUG_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_DEBUG_MSGMASK, /* bitset */
+
+ /* add new constants above here */
+ __ETHTOOL_A_DEBUG_CNT,
+ ETHTOOL_A_DEBUG_MAX = __ETHTOOL_A_DEBUG_CNT - 1
+};
+
+/* WOL */
+
+enum {
+ ETHTOOL_A_WOL_UNSPEC,
+ ETHTOOL_A_WOL_HEADER, /* nest - _A_HEADER_* */
+ ETHTOOL_A_WOL_MODES, /* bitset */
+ ETHTOOL_A_WOL_SOPASS, /* binary */
+
+ /* add new constants above here */
+ __ETHTOOL_A_WOL_CNT,
+ ETHTOOL_A_WOL_MAX = __ETHTOOL_A_WOL_CNT - 1
+};
+
+/* generic netlink info */
+#define ETHTOOL_GENL_NAME "ethtool"
+#define ETHTOOL_GENL_VERSION 1
+
+#define ETHTOOL_MCGRP_MONITOR_NAME "monitor"
+
+#endif /* _UAPI_LINUX_ETHTOOL_NETLINK_H_ */
diff --git a/include/uapi/linux/fcntl.h b/include/uapi/linux/fcntl.h
index 1d338357df8a..ca88b7bce553 100644
--- a/include/uapi/linux/fcntl.h
+++ b/include/uapi/linux/fcntl.h
@@ -3,6 +3,7 @@
#define _UAPI_LINUX_FCNTL_H
#include <asm/fcntl.h>
+#include <linux/openat2.h>
#define F_SETLEASE (F_LINUX_SPECIFIC_BASE + 0)
#define F_GETLEASE (F_LINUX_SPECIFIC_BASE + 1)
@@ -58,7 +59,7 @@
* Valid hint values for F_{GET,SET}_RW_HINT. 0 is "not set", or can be
* used to clear any hints previously set.
*/
-#define RWF_WRITE_LIFE_NOT_SET 0
+#define RWH_WRITE_LIFE_NOT_SET 0
#define RWH_WRITE_LIFE_NONE 1
#define RWH_WRITE_LIFE_SHORT 2
#define RWH_WRITE_LIFE_MEDIUM 3
@@ -66,6 +67,13 @@
#define RWH_WRITE_LIFE_EXTREME 5
/*
+ * The originally introduced spelling is remained from the first
+ * versions of the patch set that introduced the feature, see commit
+ * v4.13-rc1~212^2~51.
+ */
+#define RWF_WRITE_LIFE_NOT_SET RWH_WRITE_LIFE_NOT_SET
+
+/*
* Types of directory notifications that may be requested.
*/
#define DN_ACCESS 0x00000001 /* File accessed */
@@ -93,5 +101,4 @@
#define AT_RECURSIVE 0x8000 /* Apply to the entire subtree */
-
#endif /* _UAPI_LINUX_FCNTL_H */
diff --git a/include/uapi/linux/fdreg.h b/include/uapi/linux/fdreg.h
index 5e2981d5c523..1318881954e1 100644
--- a/include/uapi/linux/fdreg.h
+++ b/include/uapi/linux/fdreg.h
@@ -7,26 +7,18 @@
* Handbook", Sanches and Canton.
*/
-#ifdef FDPATCHES
-#define FD_IOPORT fdc_state[fdc].address
-#else
-/* It would be a lot saner just to force fdc_state[fdc].address to always
- be set ! FIXME */
-#define FD_IOPORT 0x3f0
-#endif
-
/* Fd controller regs. S&C, about page 340 */
-#define FD_STATUS (4 + FD_IOPORT )
-#define FD_DATA (5 + FD_IOPORT )
+#define FD_STATUS 4
+#define FD_DATA 5
/* Digital Output Register */
-#define FD_DOR (2 + FD_IOPORT )
+#define FD_DOR 2
/* Digital Input Register (read) */
-#define FD_DIR (7 + FD_IOPORT )
+#define FD_DIR 7
/* Diskette Control Register (write)*/
-#define FD_DCR (7 + FD_IOPORT )
+#define FD_DCR 7
/* Bits of main status register */
#define STATUS_BUSYMASK 0x0F /* drive busy mask */
diff --git a/include/uapi/linux/fscrypt.h b/include/uapi/linux/fscrypt.h
index 39ccfe9311c3..a10e3cdc2839 100644
--- a/include/uapi/linux/fscrypt.h
+++ b/include/uapi/linux/fscrypt.h
@@ -8,6 +8,7 @@
#ifndef _UAPI_LINUX_FSCRYPT_H
#define _UAPI_LINUX_FSCRYPT_H
+#include <linux/ioctl.h>
#include <linux/types.h>
/* Encryption policy flags */
@@ -17,7 +18,8 @@
#define FSCRYPT_POLICY_FLAGS_PAD_32 0x03
#define FSCRYPT_POLICY_FLAGS_PAD_MASK 0x03
#define FSCRYPT_POLICY_FLAG_DIRECT_KEY 0x04
-#define FSCRYPT_POLICY_FLAGS_VALID 0x07
+#define FSCRYPT_POLICY_FLAG_IV_INO_LBLK_64 0x08
+#define FSCRYPT_POLICY_FLAGS_VALID 0x0F
/* Encryption algorithms */
#define FSCRYPT_MODE_AES_256_XTS 1
@@ -108,11 +110,22 @@ struct fscrypt_key_specifier {
} u;
};
+/*
+ * Payload of Linux keyring key of type "fscrypt-provisioning", referenced by
+ * fscrypt_add_key_arg::key_id as an alternative to fscrypt_add_key_arg::raw.
+ */
+struct fscrypt_provisioning_key_payload {
+ __u32 type;
+ __u32 __reserved;
+ __u8 raw[];
+};
+
/* Struct passed to FS_IOC_ADD_ENCRYPTION_KEY */
struct fscrypt_add_key_arg {
struct fscrypt_key_specifier key_spec;
__u32 raw_size;
- __u32 __reserved[9];
+ __u32 key_id;
+ __u32 __reserved[8];
__u8 raw[];
};
@@ -150,6 +163,7 @@ struct fscrypt_get_key_status_arg {
#define FS_IOC_REMOVE_ENCRYPTION_KEY _IOWR('f', 24, struct fscrypt_remove_key_arg)
#define FS_IOC_REMOVE_ENCRYPTION_KEY_ALL_USERS _IOWR('f', 25, struct fscrypt_remove_key_arg)
#define FS_IOC_GET_ENCRYPTION_KEY_STATUS _IOWR('f', 26, struct fscrypt_get_key_status_arg)
+#define FS_IOC_GET_ENCRYPTION_NONCE _IOR('f', 27, __u8[16])
/**********************************************************************/
diff --git a/include/uapi/linux/gen_stats.h b/include/uapi/linux/gen_stats.h
index 065408e16a80..852f234f1fd6 100644
--- a/include/uapi/linux/gen_stats.h
+++ b/include/uapi/linux/gen_stats.h
@@ -13,6 +13,7 @@ enum {
TCA_STATS_RATE_EST64,
TCA_STATS_PAD,
TCA_STATS_BASIC_HW,
+ TCA_STATS_PKT64,
__TCA_STATS_MAX,
};
#define TCA_STATS_MAX (__TCA_STATS_MAX - 1)
@@ -26,10 +27,6 @@ struct gnet_stats_basic {
__u64 bytes;
__u32 packets;
};
-struct gnet_stats_basic_packed {
- __u64 bytes;
- __u32 packets;
-} __attribute__ ((packed));
/**
* struct gnet_stats_rate_est - rate estimator
diff --git a/include/uapi/linux/gigaset_dev.h b/include/uapi/linux/gigaset_dev.h
deleted file mode 100644
index 279551af8ebf..000000000000
--- a/include/uapi/linux/gigaset_dev.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0+ WITH Linux-syscall-note */
-/*
- * interface to user space for the gigaset driver
- *
- * Copyright (c) 2004 by Hansjoerg Lipp <hjlipp@web.de>
- *
- * =====================================================================
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation; either version 2 of
- * the License, or (at your option) any later version.
- * =====================================================================
- */
-
-#ifndef GIGASET_INTERFACE_H
-#define GIGASET_INTERFACE_H
-
-#include <linux/ioctl.h>
-
-/* The magic IOCTL value for this interface. */
-#define GIGASET_IOCTL 0x47
-
-/* enable/disable device control via character device (lock out ISDN subsys) */
-#define GIGASET_REDIR _IOWR(GIGASET_IOCTL, 0, int)
-
-/* enable adapter configuration mode (M10x only) */
-#define GIGASET_CONFIG _IOWR(GIGASET_IOCTL, 1, int)
-
-/* set break characters (M105 only) */
-#define GIGASET_BRKCHARS _IOW(GIGASET_IOCTL, 2, unsigned char[6])
-
-/* get version information selected by arg[0] */
-#define GIGASET_VERSION _IOWR(GIGASET_IOCTL, 3, unsigned[4])
-/* values for GIGASET_VERSION arg[0] */
-#define GIGVER_DRIVER 0 /* get driver version */
-#define GIGVER_COMPAT 1 /* get interface compatibility version */
-#define GIGVER_FWBASE 2 /* get base station firmware version */
-
-#endif
diff --git a/include/uapi/linux/gpio.h b/include/uapi/linux/gpio.h
index 4ebfe0ac6c5b..799cf823d493 100644
--- a/include/uapi/linux/gpio.h
+++ b/include/uapi/linux/gpio.h
@@ -33,6 +33,9 @@ struct gpiochip_info {
#define GPIOLINE_FLAG_ACTIVE_LOW (1UL << 2)
#define GPIOLINE_FLAG_OPEN_DRAIN (1UL << 3)
#define GPIOLINE_FLAG_OPEN_SOURCE (1UL << 4)
+#define GPIOLINE_FLAG_BIAS_PULL_UP (1UL << 5)
+#define GPIOLINE_FLAG_BIAS_PULL_DOWN (1UL << 6)
+#define GPIOLINE_FLAG_BIAS_DISABLE (1UL << 7)
/**
* struct gpioline_info - Information about a certain GPIO line
@@ -62,6 +65,9 @@ struct gpioline_info {
#define GPIOHANDLE_REQUEST_ACTIVE_LOW (1UL << 2)
#define GPIOHANDLE_REQUEST_OPEN_DRAIN (1UL << 3)
#define GPIOHANDLE_REQUEST_OPEN_SOURCE (1UL << 4)
+#define GPIOHANDLE_REQUEST_BIAS_PULL_UP (1UL << 5)
+#define GPIOHANDLE_REQUEST_BIAS_PULL_DOWN (1UL << 6)
+#define GPIOHANDLE_REQUEST_BIAS_DISABLE (1UL << 7)
/**
* struct gpiohandle_request - Information about a GPIO handle request
@@ -95,6 +101,24 @@ struct gpiohandle_request {
};
/**
+ * struct gpiohandle_config - Configuration for a GPIO handle request
+ * @flags: updated flags for the requested GPIO lines, such as
+ * GPIOHANDLE_REQUEST_OUTPUT, GPIOHANDLE_REQUEST_ACTIVE_LOW etc, OR:ed
+ * together
+ * @default_values: if the GPIOHANDLE_REQUEST_OUTPUT is set in flags,
+ * this specifies the default output value, should be 0 (low) or
+ * 1 (high), anything else than 0 or 1 will be interpreted as 1 (high)
+ * @padding: reserved for future use and should be zero filled
+ */
+struct gpiohandle_config {
+ __u32 flags;
+ __u8 default_values[GPIOHANDLES_MAX];
+ __u32 padding[4]; /* padding for future use */
+};
+
+#define GPIOHANDLE_SET_CONFIG_IOCTL _IOWR(0xB4, 0x0a, struct gpiohandle_config)
+
+/**
* struct gpiohandle_data - Information of values on a GPIO handle
* @values: when getting the state of lines this contains the current
* state of a line, when setting the state of lines these should contain
diff --git a/include/uapi/linux/hdlc/ioctl.h b/include/uapi/linux/hdlc/ioctl.h
index 0fe4238e8246..b06341acab5e 100644
--- a/include/uapi/linux/hdlc/ioctl.h
+++ b/include/uapi/linux/hdlc/ioctl.h
@@ -79,6 +79,15 @@ typedef struct {
unsigned int timeout;
} cisco_proto;
+typedef struct {
+ unsigned short dce; /* 1 for DCE (network side) operation */
+ unsigned int modulo; /* modulo (8 = basic / 128 = extended) */
+ unsigned int window; /* frame window size */
+ unsigned int t1; /* timeout t1 */
+ unsigned int t2; /* timeout t2 */
+ unsigned int n2; /* frame retry counter */
+} x25_hdlc_proto;
+
/* PPP doesn't need any info now - supply length = 0 to ioctl */
#endif /* __ASSEMBLY__ */
diff --git a/include/uapi/linux/hidraw.h b/include/uapi/linux/hidraw.h
index 98e2c493de85..4913539e5bcc 100644
--- a/include/uapi/linux/hidraw.h
+++ b/include/uapi/linux/hidraw.h
@@ -39,6 +39,7 @@ struct hidraw_devinfo {
/* The first byte of SFEATURE and GFEATURE is the report number */
#define HIDIOCSFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x06, len)
#define HIDIOCGFEATURE(len) _IOC(_IOC_WRITE|_IOC_READ, 'H', 0x07, len)
+#define HIDIOCGRAWUNIQ(len) _IOC(_IOC_READ, 'H', 0x08, len)
#define HIDRAW_FIRST_MINOR 0
#define HIDRAW_MAX_DEVICES 64
diff --git a/include/uapi/linux/hysdn_if.h b/include/uapi/linux/hysdn_if.h
deleted file mode 100644
index 99f77c517e6d..000000000000
--- a/include/uapi/linux/hysdn_if.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
-/* $Id: hysdn_if.h,v 1.1.8.3 2001/09/23 22:25:05 kai Exp $
- *
- * Linux driver for HYSDN cards
- * ioctl definitions shared by hynetmgr and driver.
- *
- * Author Werner Cornelius (werner@titro.de) for Hypercope GmbH
- * Copyright 1999 by Werner Cornelius (werner@titro.de)
- *
- * This software may be used and distributed according to the terms
- * of the GNU General Public License, incorporated herein by reference.
- *
- */
-
-/****************/
-/* error values */
-/****************/
-#define ERR_NONE 0 /* no error occurred */
-#define ERR_ALREADY_BOOT 1000 /* we are already booting */
-#define EPOF_BAD_MAGIC 1001 /* bad magic in POF header */
-#define ERR_BOARD_DPRAM 1002 /* board DPRAM failed */
-#define EPOF_INTERNAL 1003 /* internal POF handler error */
-#define EPOF_BAD_IMG_SIZE 1004 /* POF boot image size invalid */
-#define ERR_BOOTIMG_FAIL 1005 /* 1. stage boot image did not start */
-#define ERR_BOOTSEQ_FAIL 1006 /* 2. stage boot seq handshake timeout */
-#define ERR_POF_TIMEOUT 1007 /* timeout waiting for card pof ready */
-#define ERR_NOT_BOOTED 1008 /* operation only allowed when booted */
-#define ERR_CONF_LONG 1009 /* conf line is too long */
-#define ERR_INV_CHAN 1010 /* invalid channel number */
-#define ERR_ASYNC_TIME 1011 /* timeout sending async data */
-
-
-
-
diff --git a/include/uapi/linux/idxd.h b/include/uapi/linux/idxd.h
new file mode 100644
index 000000000000..849ef1515d04
--- /dev/null
+++ b/include/uapi/linux/idxd.h
@@ -0,0 +1,228 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
+#ifndef _USR_IDXD_H_
+#define _USR_IDXD_H_
+
+#ifdef __KERNEL__
+#include <linux/types.h>
+#else
+#include <stdint.h>
+#endif
+
+/* Descriptor flags */
+#define IDXD_OP_FLAG_FENCE 0x0001
+#define IDXD_OP_FLAG_BOF 0x0002
+#define IDXD_OP_FLAG_CRAV 0x0004
+#define IDXD_OP_FLAG_RCR 0x0008
+#define IDXD_OP_FLAG_RCI 0x0010
+#define IDXD_OP_FLAG_CRSTS 0x0020
+#define IDXD_OP_FLAG_CR 0x0080
+#define IDXD_OP_FLAG_CC 0x0100
+#define IDXD_OP_FLAG_ADDR1_TCS 0x0200
+#define IDXD_OP_FLAG_ADDR2_TCS 0x0400
+#define IDXD_OP_FLAG_ADDR3_TCS 0x0800
+#define IDXD_OP_FLAG_CR_TCS 0x1000
+#define IDXD_OP_FLAG_STORD 0x2000
+#define IDXD_OP_FLAG_DRDBK 0x4000
+#define IDXD_OP_FLAG_DSTS 0x8000
+
+/* Opcode */
+enum dsa_opcode {
+ DSA_OPCODE_NOOP = 0,
+ DSA_OPCODE_BATCH,
+ DSA_OPCODE_DRAIN,
+ DSA_OPCODE_MEMMOVE,
+ DSA_OPCODE_MEMFILL,
+ DSA_OPCODE_COMPARE,
+ DSA_OPCODE_COMPVAL,
+ DSA_OPCODE_CR_DELTA,
+ DSA_OPCODE_AP_DELTA,
+ DSA_OPCODE_DUALCAST,
+ DSA_OPCODE_CRCGEN = 0x10,
+ DSA_OPCODE_COPY_CRC,
+ DSA_OPCODE_DIF_CHECK,
+ DSA_OPCODE_DIF_INS,
+ DSA_OPCODE_DIF_STRP,
+ DSA_OPCODE_DIF_UPDT,
+ DSA_OPCODE_CFLUSH = 0x20,
+};
+
+/* Completion record status */
+enum dsa_completion_status {
+ DSA_COMP_NONE = 0,
+ DSA_COMP_SUCCESS,
+ DSA_COMP_SUCCESS_PRED,
+ DSA_COMP_PAGE_FAULT_NOBOF,
+ DSA_COMP_PAGE_FAULT_IR,
+ DSA_COMP_BATCH_FAIL,
+ DSA_COMP_BATCH_PAGE_FAULT,
+ DSA_COMP_DR_OFFSET_NOINC,
+ DSA_COMP_DR_OFFSET_ERANGE,
+ DSA_COMP_DIF_ERR,
+ DSA_COMP_BAD_OPCODE = 0x10,
+ DSA_COMP_INVALID_FLAGS,
+ DSA_COMP_NOZERO_RESERVE,
+ DSA_COMP_XFER_ERANGE,
+ DSA_COMP_DESC_CNT_ERANGE,
+ DSA_COMP_DR_ERANGE,
+ DSA_COMP_OVERLAP_BUFFERS,
+ DSA_COMP_DCAST_ERR,
+ DSA_COMP_DESCLIST_ALIGN,
+ DSA_COMP_INT_HANDLE_INVAL,
+ DSA_COMP_CRA_XLAT,
+ DSA_COMP_CRA_ALIGN,
+ DSA_COMP_ADDR_ALIGN,
+ DSA_COMP_PRIV_BAD,
+ DSA_COMP_TRAFFIC_CLASS_CONF,
+ DSA_COMP_PFAULT_RDBA,
+ DSA_COMP_HW_ERR1,
+ DSA_COMP_HW_ERR_DRB,
+ DSA_COMP_TRANSLATION_FAIL,
+};
+
+#define DSA_COMP_STATUS_MASK 0x7f
+#define DSA_COMP_STATUS_WRITE 0x80
+
+struct dsa_batch_desc {
+ uint32_t pasid:20;
+ uint32_t rsvd:11;
+ uint32_t priv:1;
+ uint32_t flags:24;
+ uint32_t opcode:8;
+ uint64_t completion_addr;
+ uint64_t desc_list_addr;
+ uint64_t rsvd1;
+ uint32_t desc_count;
+ uint16_t interrupt_handle;
+ uint16_t rsvd2;
+ uint8_t rsvd3[24];
+} __attribute__((packed));
+
+struct dsa_hw_desc {
+ uint32_t pasid:20;
+ uint32_t rsvd:11;
+ uint32_t priv:1;
+ uint32_t flags:24;
+ uint32_t opcode:8;
+ uint64_t completion_addr;
+ union {
+ uint64_t src_addr;
+ uint64_t rdback_addr;
+ uint64_t pattern;
+ };
+ union {
+ uint64_t dst_addr;
+ uint64_t rdback_addr2;
+ uint64_t src2_addr;
+ uint64_t comp_pattern;
+ };
+ uint32_t xfer_size;
+ uint16_t int_handle;
+ uint16_t rsvd1;
+ union {
+ uint8_t expected_res;
+ struct {
+ uint64_t delta_addr;
+ uint32_t max_delta_size;
+ };
+ uint32_t delta_rec_size;
+ uint64_t dest2;
+ /* CRC */
+ struct {
+ uint32_t crc_seed;
+ uint32_t crc_rsvd;
+ uint64_t seed_addr;
+ };
+ /* DIF check or strip */
+ struct {
+ uint8_t src_dif_flags;
+ uint8_t dif_chk_res;
+ uint8_t dif_chk_flags;
+ uint8_t dif_chk_res2[5];
+ uint32_t chk_ref_tag_seed;
+ uint16_t chk_app_tag_mask;
+ uint16_t chk_app_tag_seed;
+ };
+ /* DIF insert */
+ struct {
+ uint8_t dif_ins_res;
+ uint8_t dest_dif_flag;
+ uint8_t dif_ins_flags;
+ uint8_t dif_ins_res2[13];
+ uint32_t ins_ref_tag_seed;
+ uint16_t ins_app_tag_mask;
+ uint16_t ins_app_tag_seed;
+ };
+ /* DIF update */
+ struct {
+ uint8_t src_upd_flags;
+ uint8_t upd_dest_flags;
+ uint8_t dif_upd_flags;
+ uint8_t dif_upd_res[5];
+ uint32_t src_ref_tag_seed;
+ uint16_t src_app_tag_mask;
+ uint16_t src_app_tag_seed;
+ uint32_t dest_ref_tag_seed;
+ uint16_t dest_app_tag_mask;
+ uint16_t dest_app_tag_seed;
+ };
+
+ uint8_t op_specific[24];
+ };
+} __attribute__((packed));
+
+struct dsa_raw_desc {
+ uint64_t field[8];
+} __attribute__((packed));
+
+/*
+ * The status field will be modified by hardware, therefore it should be
+ * volatile and prevent the compiler from optimize the read.
+ */
+struct dsa_completion_record {
+ volatile uint8_t status;
+ union {
+ uint8_t result;
+ uint8_t dif_status;
+ };
+ uint16_t rsvd;
+ uint32_t bytes_completed;
+ uint64_t fault_addr;
+ union {
+ uint16_t delta_rec_size;
+ uint16_t crc_val;
+
+ /* DIF check & strip */
+ struct {
+ uint32_t dif_chk_ref_tag;
+ uint16_t dif_chk_app_tag_mask;
+ uint16_t dif_chk_app_tag;
+ };
+
+ /* DIF insert */
+ struct {
+ uint64_t dif_ins_res;
+ uint32_t dif_ins_ref_tag;
+ uint16_t dif_ins_app_tag_mask;
+ uint16_t dif_ins_app_tag;
+ };
+
+ /* DIF update */
+ struct {
+ uint32_t dif_upd_src_ref_tag;
+ uint16_t dif_upd_src_app_tag_mask;
+ uint16_t dif_upd_src_app_tag;
+ uint32_t dif_upd_dest_ref_tag;
+ uint16_t dif_upd_dest_app_tag_mask;
+ uint16_t dif_upd_dest_app_tag;
+ };
+
+ uint8_t op_specific[16];
+ };
+} __attribute__((packed));
+
+struct dsa_raw_completion_record {
+ uint64_t field[4];
+} __attribute__((packed));
+
+#endif
diff --git a/include/uapi/linux/if.h b/include/uapi/linux/if.h
index 7fea0fd7d6f5..be714cd8c826 100644
--- a/include/uapi/linux/if.h
+++ b/include/uapi/linux/if.h
@@ -33,6 +33,7 @@
#define IFNAMSIZ 16
#endif /* __UAPI_DEF_IF_IFNAMSIZ */
#define IFALIASZ 256
+#define ALTIFNAMSIZ 128
#include <linux/hdlc/ioctl.h>
/* For glibc compatibility. An empty enum does not compile. */
@@ -212,6 +213,7 @@ struct if_settings {
fr_proto __user *fr;
fr_proto_pvc __user *fr_pvc;
fr_proto_pvc_info __user *fr_pvc_info;
+ x25_hdlc_proto __user *x25;
/* interface settings */
sync_serial_settings __user *sync;
diff --git a/include/uapi/linux/if_bonding.h b/include/uapi/linux/if_bonding.h
index 790585f0e61b..45f3750aa861 100644
--- a/include/uapi/linux/if_bonding.h
+++ b/include/uapi/linux/if_bonding.h
@@ -95,6 +95,16 @@
#define BOND_XMIT_POLICY_ENCAP23 3 /* encapsulated layer 2+3 */
#define BOND_XMIT_POLICY_ENCAP34 4 /* encapsulated layer 3+4 */
+/* 802.3ad port state definitions (43.4.2.2 in the 802.3ad standard) */
+#define LACP_STATE_LACP_ACTIVITY 0x1
+#define LACP_STATE_LACP_TIMEOUT 0x2
+#define LACP_STATE_AGGREGATION 0x4
+#define LACP_STATE_SYNCHRONIZATION 0x8
+#define LACP_STATE_COLLECTING 0x10
+#define LACP_STATE_DISTRIBUTING 0x20
+#define LACP_STATE_DEFAULTED 0x40
+#define LACP_STATE_EXPIRED 0x80
+
typedef struct ifbond {
__s32 bond_mode;
__s32 num_slaves;
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 1b3c2b643a02..42f7ca38ad80 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -130,6 +130,7 @@ enum {
#define BRIDGE_VLAN_INFO_RANGE_BEGIN (1<<3) /* VLAN is start of vlan range */
#define BRIDGE_VLAN_INFO_RANGE_END (1<<4) /* VLAN is end of vlan range */
#define BRIDGE_VLAN_INFO_BRENTRY (1<<5) /* Global bridge VLAN entry */
+#define BRIDGE_VLAN_INFO_ONLY_OPTS (1<<6) /* Skip create/delete/flags */
struct bridge_vlan_info {
__u16 flags;
@@ -156,6 +157,45 @@ struct bridge_vlan_xstats {
__u32 pad2;
};
+struct bridge_stp_xstats {
+ __u64 transition_blk;
+ __u64 transition_fwd;
+ __u64 rx_bpdu;
+ __u64 tx_bpdu;
+ __u64 rx_tcn;
+ __u64 tx_tcn;
+};
+
+/* Bridge vlan RTM header */
+struct br_vlan_msg {
+ __u8 family;
+ __u8 reserved1;
+ __u16 reserved2;
+ __u32 ifindex;
+};
+
+/* Bridge vlan RTM attributes
+ * [BRIDGE_VLANDB_ENTRY] = {
+ * [BRIDGE_VLANDB_ENTRY_INFO]
+ * ...
+ * }
+ */
+enum {
+ BRIDGE_VLANDB_UNSPEC,
+ BRIDGE_VLANDB_ENTRY,
+ __BRIDGE_VLANDB_MAX,
+};
+#define BRIDGE_VLANDB_MAX (__BRIDGE_VLANDB_MAX - 1)
+
+enum {
+ BRIDGE_VLANDB_ENTRY_UNSPEC,
+ BRIDGE_VLANDB_ENTRY_INFO,
+ BRIDGE_VLANDB_ENTRY_RANGE,
+ BRIDGE_VLANDB_ENTRY_STATE,
+ __BRIDGE_VLANDB_ENTRY_MAX,
+};
+#define BRIDGE_VLANDB_ENTRY_MAX (__BRIDGE_VLANDB_ENTRY_MAX - 1)
+
/* Bridge multicast database attributes
* [MDBA_MDB] = {
* [MDBA_MDB_ENTRY] = {
@@ -262,6 +302,7 @@ enum {
BRIDGE_XSTATS_VLAN,
BRIDGE_XSTATS_MCAST,
BRIDGE_XSTATS_PAD,
+ BRIDGE_XSTATS_STP,
__BRIDGE_XSTATS_MAX
};
#define BRIDGE_XSTATS_MAX (__BRIDGE_XSTATS_MAX - 1)
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 4a8c02cafa9a..024af2d1d0af 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -167,6 +167,9 @@ enum {
IFLA_NEW_IFINDEX,
IFLA_MIN_MTU,
IFLA_MAX_MTU,
+ IFLA_PROP_LIST,
+ IFLA_ALT_IFNAME, /* Alternative ifname */
+ IFLA_PERM_ADDRESS,
__IFLA_MAX
};
@@ -483,6 +486,13 @@ enum macsec_validation_type {
MACSEC_VALIDATE_MAX = __MACSEC_VALIDATE_END - 1,
};
+enum macsec_offload {
+ MACSEC_OFFLOAD_OFF = 0,
+ MACSEC_OFFLOAD_PHY = 1,
+ __MACSEC_OFFLOAD_END,
+ MACSEC_OFFLOAD_MAX = __MACSEC_OFFLOAD_END - 1,
+};
+
/* IPVLAN section */
enum {
IFLA_IPVLAN_UNSPEC,
diff --git a/include/uapi/linux/if_macsec.h b/include/uapi/linux/if_macsec.h
index 98e4d5d7c45c..1d63c43c38cc 100644
--- a/include/uapi/linux/if_macsec.h
+++ b/include/uapi/linux/if_macsec.h
@@ -45,6 +45,7 @@ enum macsec_attrs {
MACSEC_ATTR_RXSC_LIST, /* dump, nested, macsec_rxsc_attrs for each RXSC */
MACSEC_ATTR_TXSC_STATS, /* dump, nested, macsec_txsc_stats_attr */
MACSEC_ATTR_SECY_STATS, /* dump, nested, macsec_secy_stats_attr */
+ MACSEC_ATTR_OFFLOAD, /* config, nested, macsec_offload_attrs */
__MACSEC_ATTR_END,
NUM_MACSEC_ATTR = __MACSEC_ATTR_END,
MACSEC_ATTR_MAX = __MACSEC_ATTR_END - 1,
@@ -97,6 +98,15 @@ enum macsec_sa_attrs {
MACSEC_SA_ATTR_MAX = __MACSEC_SA_ATTR_END - 1,
};
+enum macsec_offload_attrs {
+ MACSEC_OFFLOAD_ATTR_UNSPEC,
+ MACSEC_OFFLOAD_ATTR_TYPE, /* config/dump, u8 0..2 */
+ MACSEC_OFFLOAD_ATTR_PAD,
+ __MACSEC_OFFLOAD_ATTR_END,
+ NUM_MACSEC_OFFLOAD_ATTR = __MACSEC_OFFLOAD_ATTR_END,
+ MACSEC_OFFLOAD_ATTR_MAX = __MACSEC_OFFLOAD_ATTR_END - 1,
+};
+
enum macsec_nl_commands {
MACSEC_CMD_GET_TXSC,
MACSEC_CMD_ADD_RXSC,
@@ -108,6 +118,7 @@ enum macsec_nl_commands {
MACSEC_CMD_ADD_RXSA,
MACSEC_CMD_DEL_RXSA,
MACSEC_CMD_UPD_RXSA,
+ MACSEC_CMD_UPD_OFFLOAD,
};
/* u64 per-RXSC stats */
diff --git a/include/uapi/linux/in.h b/include/uapi/linux/in.h
index e7ad9d350a28..8533bf07450f 100644
--- a/include/uapi/linux/in.h
+++ b/include/uapi/linux/in.h
@@ -74,8 +74,12 @@ enum {
#define IPPROTO_UDPLITE IPPROTO_UDPLITE
IPPROTO_MPLS = 137, /* MPLS in IP (RFC 4023) */
#define IPPROTO_MPLS IPPROTO_MPLS
+ IPPROTO_ETHERNET = 143, /* Ethernet-within-IPv6 Encapsulation */
+#define IPPROTO_ETHERNET IPPROTO_ETHERNET
IPPROTO_RAW = 255, /* Raw IP packets */
#define IPPROTO_RAW IPPROTO_RAW
+ IPPROTO_MPTCP = 262, /* Multipath TCP connection */
+#define IPPROTO_MPTCP IPPROTO_MPTCP
IPPROTO_MAX
};
#endif
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 85387c76c24f..6923dc7e0298 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -649,6 +649,86 @@
*/
#define KEY_DATA 0x277
#define KEY_ONSCREEN_KEYBOARD 0x278
+/* Electronic privacy screen control */
+#define KEY_PRIVACY_SCREEN_TOGGLE 0x279
+
+/* Select an area of screen to be copied */
+#define KEY_SELECTIVE_SCREENSHOT 0x27a
+
+/*
+ * Some keyboards have keys which do not have a defined meaning, these keys
+ * are intended to be programmed / bound to macros by the user. For most
+ * keyboards with these macro-keys the key-sequence to inject, or action to
+ * take, is all handled by software on the host side. So from the kernel's
+ * point of view these are just normal keys.
+ *
+ * The KEY_MACRO# codes below are intended for such keys, which may be labeled
+ * e.g. G1-G18, or S1 - S30. The KEY_MACRO# codes MUST NOT be used for keys
+ * where the marking on the key does indicate a defined meaning / purpose.
+ *
+ * The KEY_MACRO# codes MUST also NOT be used as fallback for when no existing
+ * KEY_FOO define matches the marking / purpose. In this case a new KEY_FOO
+ * define MUST be added.
+ */
+#define KEY_MACRO1 0x290
+#define KEY_MACRO2 0x291
+#define KEY_MACRO3 0x292
+#define KEY_MACRO4 0x293
+#define KEY_MACRO5 0x294
+#define KEY_MACRO6 0x295
+#define KEY_MACRO7 0x296
+#define KEY_MACRO8 0x297
+#define KEY_MACRO9 0x298
+#define KEY_MACRO10 0x299
+#define KEY_MACRO11 0x29a
+#define KEY_MACRO12 0x29b
+#define KEY_MACRO13 0x29c
+#define KEY_MACRO14 0x29d
+#define KEY_MACRO15 0x29e
+#define KEY_MACRO16 0x29f
+#define KEY_MACRO17 0x2a0
+#define KEY_MACRO18 0x2a1
+#define KEY_MACRO19 0x2a2
+#define KEY_MACRO20 0x2a3
+#define KEY_MACRO21 0x2a4
+#define KEY_MACRO22 0x2a5
+#define KEY_MACRO23 0x2a6
+#define KEY_MACRO24 0x2a7
+#define KEY_MACRO25 0x2a8
+#define KEY_MACRO26 0x2a9
+#define KEY_MACRO27 0x2aa
+#define KEY_MACRO28 0x2ab
+#define KEY_MACRO29 0x2ac
+#define KEY_MACRO30 0x2ad
+
+/*
+ * Some keyboards with the macro-keys described above have some extra keys
+ * for controlling the host-side software responsible for the macro handling:
+ * -A macro recording start/stop key. Note that not all keyboards which emit
+ * KEY_MACRO_RECORD_START will also emit KEY_MACRO_RECORD_STOP if
+ * KEY_MACRO_RECORD_STOP is not advertised, then KEY_MACRO_RECORD_START
+ * should be interpreted as a recording start/stop toggle;
+ * -Keys for switching between different macro (pre)sets, either a key for
+ * cycling through the configured presets or keys to directly select a preset.
+ */
+#define KEY_MACRO_RECORD_START 0x2b0
+#define KEY_MACRO_RECORD_STOP 0x2b1
+#define KEY_MACRO_PRESET_CYCLE 0x2b2
+#define KEY_MACRO_PRESET1 0x2b3
+#define KEY_MACRO_PRESET2 0x2b4
+#define KEY_MACRO_PRESET3 0x2b5
+
+/*
+ * Some keyboards have a buildin LCD panel where the contents are controlled
+ * by the host. Often these have a number of keys directly below the LCD
+ * intended for controlling a menu shown on the LCD. These keys often don't
+ * have any labeling so we just name them KEY_KBD_LCD_MENU#
+ */
+#define KEY_KBD_LCD_MENU1 0x2b8
+#define KEY_KBD_LCD_MENU2 0x2b9
+#define KEY_KBD_LCD_MENU3 0x2ba
+#define KEY_KBD_LCD_MENU4 0x2bb
+#define KEY_KBD_LCD_MENU5 0x2bc
#define BTN_TRIGGER_HAPPY 0x2c0
#define BTN_TRIGGER_HAPPY1 0x2c0
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index f056b2a00d5c..9a61c28ed3ae 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -34,6 +34,7 @@ struct input_event {
__kernel_ulong_t __sec;
#if defined(__sparc__) && defined(__arch64__)
unsigned int __usec;
+ unsigned int __pad;
#else
__kernel_ulong_t __usec;
#endif
diff --git a/include/uapi/linux/io_uring.h b/include/uapi/linux/io_uring.h
index ea57526a5b89..e48d746b8e2a 100644
--- a/include/uapi/linux/io_uring.h
+++ b/include/uapi/linux/io_uring.h
@@ -1,4 +1,4 @@
-/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
/*
* Header file for the io_uring interface.
*
@@ -19,8 +19,14 @@ struct io_uring_sqe {
__u8 flags; /* IOSQE_ flags */
__u16 ioprio; /* ioprio for the request */
__s32 fd; /* file descriptor to do IO on */
- __u64 off; /* offset into file */
- __u64 addr; /* pointer to buffer or iovecs */
+ union {
+ __u64 off; /* offset into file */
+ __u64 addr2;
+ };
+ union {
+ __u64 addr; /* pointer to buffer or iovecs */
+ __u64 splice_off_in;
+ };
__u32 len; /* buffer size or number of iovecs */
union {
__kernel_rwf_t rw_flags;
@@ -29,20 +35,55 @@ struct io_uring_sqe {
__u32 sync_range_flags;
__u32 msg_flags;
__u32 timeout_flags;
+ __u32 accept_flags;
+ __u32 cancel_flags;
+ __u32 open_flags;
+ __u32 statx_flags;
+ __u32 fadvise_advice;
+ __u32 splice_flags;
};
__u64 user_data; /* data to be passed back at completion time */
union {
- __u16 buf_index; /* index into fixed buffers, if used */
+ struct {
+ /* pack this to avoid bogus arm OABI complaints */
+ union {
+ /* index into fixed buffers, if used */
+ __u16 buf_index;
+ /* for grouped buffer selection */
+ __u16 buf_group;
+ } __attribute__((packed));
+ /* personality to use, if used */
+ __u16 personality;
+ __s32 splice_fd_in;
+ };
__u64 __pad2[3];
};
};
+enum {
+ IOSQE_FIXED_FILE_BIT,
+ IOSQE_IO_DRAIN_BIT,
+ IOSQE_IO_LINK_BIT,
+ IOSQE_IO_HARDLINK_BIT,
+ IOSQE_ASYNC_BIT,
+ IOSQE_BUFFER_SELECT_BIT,
+};
+
/*
* sqe->flags
*/
-#define IOSQE_FIXED_FILE (1U << 0) /* use fixed fileset */
-#define IOSQE_IO_DRAIN (1U << 1) /* issue after inflight IO */
-#define IOSQE_IO_LINK (1U << 2) /* links next sqe */
+/* use fixed fileset */
+#define IOSQE_FIXED_FILE (1U << IOSQE_FIXED_FILE_BIT)
+/* issue after inflight IO */
+#define IOSQE_IO_DRAIN (1U << IOSQE_IO_DRAIN_BIT)
+/* links next sqe */
+#define IOSQE_IO_LINK (1U << IOSQE_IO_LINK_BIT)
+/* like LINK, but stronger */
+#define IOSQE_IO_HARDLINK (1U << IOSQE_IO_HARDLINK_BIT)
+/* always go async */
+#define IOSQE_ASYNC (1U << IOSQE_ASYNC_BIT)
+/* select buffer from sqe->buf_group */
+#define IOSQE_BUFFER_SELECT (1U << IOSQE_BUFFER_SELECT_BIT)
/*
* io_uring_setup() flags
@@ -50,19 +91,48 @@ struct io_uring_sqe {
#define IORING_SETUP_IOPOLL (1U << 0) /* io_context is polled */
#define IORING_SETUP_SQPOLL (1U << 1) /* SQ poll thread */
#define IORING_SETUP_SQ_AFF (1U << 2) /* sq_thread_cpu is valid */
+#define IORING_SETUP_CQSIZE (1U << 3) /* app defines CQ size */
+#define IORING_SETUP_CLAMP (1U << 4) /* clamp SQ/CQ ring sizes */
+#define IORING_SETUP_ATTACH_WQ (1U << 5) /* attach to existing wq */
+
+enum {
+ IORING_OP_NOP,
+ IORING_OP_READV,
+ IORING_OP_WRITEV,
+ IORING_OP_FSYNC,
+ IORING_OP_READ_FIXED,
+ IORING_OP_WRITE_FIXED,
+ IORING_OP_POLL_ADD,
+ IORING_OP_POLL_REMOVE,
+ IORING_OP_SYNC_FILE_RANGE,
+ IORING_OP_SENDMSG,
+ IORING_OP_RECVMSG,
+ IORING_OP_TIMEOUT,
+ IORING_OP_TIMEOUT_REMOVE,
+ IORING_OP_ACCEPT,
+ IORING_OP_ASYNC_CANCEL,
+ IORING_OP_LINK_TIMEOUT,
+ IORING_OP_CONNECT,
+ IORING_OP_FALLOCATE,
+ IORING_OP_OPENAT,
+ IORING_OP_CLOSE,
+ IORING_OP_FILES_UPDATE,
+ IORING_OP_STATX,
+ IORING_OP_READ,
+ IORING_OP_WRITE,
+ IORING_OP_FADVISE,
+ IORING_OP_MADVISE,
+ IORING_OP_SEND,
+ IORING_OP_RECV,
+ IORING_OP_OPENAT2,
+ IORING_OP_EPOLL_CTL,
+ IORING_OP_SPLICE,
+ IORING_OP_PROVIDE_BUFFERS,
+ IORING_OP_REMOVE_BUFFERS,
-#define IORING_OP_NOP 0
-#define IORING_OP_READV 1
-#define IORING_OP_WRITEV 2
-#define IORING_OP_FSYNC 3
-#define IORING_OP_READ_FIXED 4
-#define IORING_OP_WRITE_FIXED 5
-#define IORING_OP_POLL_ADD 6
-#define IORING_OP_POLL_REMOVE 7
-#define IORING_OP_SYNC_FILE_RANGE 8
-#define IORING_OP_SENDMSG 9
-#define IORING_OP_RECVMSG 10
-#define IORING_OP_TIMEOUT 11
+ /* this goes last, obviously */
+ IORING_OP_LAST,
+};
/*
* sqe->fsync_flags
@@ -70,6 +140,17 @@ struct io_uring_sqe {
#define IORING_FSYNC_DATASYNC (1U << 0)
/*
+ * sqe->timeout_flags
+ */
+#define IORING_TIMEOUT_ABS (1U << 0)
+
+/*
+ * sqe->splice_flags
+ * extends splice(2) flags
+ */
+#define SPLICE_F_FD_IN_FIXED (1U << 31) /* the last bit of __u32 */
+
+/*
* IO completion data structure (Completion Queue Entry)
*/
struct io_uring_cqe {
@@ -79,6 +160,17 @@ struct io_uring_cqe {
};
/*
+ * cqe->flags
+ *
+ * IORING_CQE_F_BUFFER If set, the upper 16 bits are the buffer ID
+ */
+#define IORING_CQE_F_BUFFER (1U << 0)
+
+enum {
+ IORING_CQE_BUFFER_SHIFT = 16,
+};
+
+/*
* Magic offsets for the application to mmap the data it needs
*/
#define IORING_OFF_SQ_RING 0ULL
@@ -131,7 +223,8 @@ struct io_uring_params {
__u32 sq_thread_cpu;
__u32 sq_thread_idle;
__u32 features;
- __u32 resv[4];
+ __u32 wq_fd;
+ __u32 resv[3];
struct io_sqring_offsets sq_off;
struct io_cqring_offsets cq_off;
};
@@ -140,6 +233,11 @@ struct io_uring_params {
* io_uring_params->features flags
*/
#define IORING_FEAT_SINGLE_MMAP (1U << 0)
+#define IORING_FEAT_NODROP (1U << 1)
+#define IORING_FEAT_SUBMIT_STABLE (1U << 2)
+#define IORING_FEAT_RW_CUR_POS (1U << 3)
+#define IORING_FEAT_CUR_PERSONALITY (1U << 4)
+#define IORING_FEAT_FAST_POLL (1U << 5)
/*
* io_uring_register(2) opcodes and arguments
@@ -150,5 +248,33 @@ struct io_uring_params {
#define IORING_UNREGISTER_FILES 3
#define IORING_REGISTER_EVENTFD 4
#define IORING_UNREGISTER_EVENTFD 5
+#define IORING_REGISTER_FILES_UPDATE 6
+#define IORING_REGISTER_EVENTFD_ASYNC 7
+#define IORING_REGISTER_PROBE 8
+#define IORING_REGISTER_PERSONALITY 9
+#define IORING_UNREGISTER_PERSONALITY 10
+
+struct io_uring_files_update {
+ __u32 offset;
+ __u32 resv;
+ __aligned_u64 /* __s32 * */ fds;
+};
+
+#define IO_URING_OP_SUPPORTED (1U << 0)
+
+struct io_uring_probe_op {
+ __u8 op;
+ __u8 resv;
+ __u16 flags; /* IO_URING_OP_* flags */
+ __u32 resv2;
+};
+
+struct io_uring_probe {
+ __u8 last_op; /* last opcode supported */
+ __u8 ops_len; /* length of ops[] array below */
+ __u16 resv;
+ __u32 resv2[3];
+ struct io_uring_probe_op ops[0];
+};
#endif
diff --git a/include/uapi/linux/iommu.h b/include/uapi/linux/iommu.h
index fc00c5d4741b..4ad3496e5c43 100644
--- a/include/uapi/linux/iommu.h
+++ b/include/uapi/linux/iommu.h
@@ -152,4 +152,173 @@ struct iommu_page_response {
__u32 code;
};
+/* defines the granularity of the invalidation */
+enum iommu_inv_granularity {
+ IOMMU_INV_GRANU_DOMAIN, /* domain-selective invalidation */
+ IOMMU_INV_GRANU_PASID, /* PASID-selective invalidation */
+ IOMMU_INV_GRANU_ADDR, /* page-selective invalidation */
+ IOMMU_INV_GRANU_NR, /* number of invalidation granularities */
+};
+
+/**
+ * struct iommu_inv_addr_info - Address Selective Invalidation Structure
+ *
+ * @flags: indicates the granularity of the address-selective invalidation
+ * - If the PASID bit is set, the @pasid field is populated and the invalidation
+ * relates to cache entries tagged with this PASID and matching the address
+ * range.
+ * - If ARCHID bit is set, @archid is populated and the invalidation relates
+ * to cache entries tagged with this architecture specific ID and matching
+ * the address range.
+ * - Both PASID and ARCHID can be set as they may tag different caches.
+ * - If neither PASID or ARCHID is set, global addr invalidation applies.
+ * - The LEAF flag indicates whether only the leaf PTE caching needs to be
+ * invalidated and other paging structure caches can be preserved.
+ * @pasid: process address space ID
+ * @archid: architecture-specific ID
+ * @addr: first stage/level input address
+ * @granule_size: page/block size of the mapping in bytes
+ * @nb_granules: number of contiguous granules to be invalidated
+ */
+struct iommu_inv_addr_info {
+#define IOMMU_INV_ADDR_FLAGS_PASID (1 << 0)
+#define IOMMU_INV_ADDR_FLAGS_ARCHID (1 << 1)
+#define IOMMU_INV_ADDR_FLAGS_LEAF (1 << 2)
+ __u32 flags;
+ __u32 archid;
+ __u64 pasid;
+ __u64 addr;
+ __u64 granule_size;
+ __u64 nb_granules;
+};
+
+/**
+ * struct iommu_inv_pasid_info - PASID Selective Invalidation Structure
+ *
+ * @flags: indicates the granularity of the PASID-selective invalidation
+ * - If the PASID bit is set, the @pasid field is populated and the invalidation
+ * relates to cache entries tagged with this PASID and matching the address
+ * range.
+ * - If the ARCHID bit is set, the @archid is populated and the invalidation
+ * relates to cache entries tagged with this architecture specific ID and
+ * matching the address range.
+ * - Both PASID and ARCHID can be set as they may tag different caches.
+ * - At least one of PASID or ARCHID must be set.
+ * @pasid: process address space ID
+ * @archid: architecture-specific ID
+ */
+struct iommu_inv_pasid_info {
+#define IOMMU_INV_PASID_FLAGS_PASID (1 << 0)
+#define IOMMU_INV_PASID_FLAGS_ARCHID (1 << 1)
+ __u32 flags;
+ __u32 archid;
+ __u64 pasid;
+};
+
+/**
+ * struct iommu_cache_invalidate_info - First level/stage invalidation
+ * information
+ * @version: API version of this structure
+ * @cache: bitfield that allows to select which caches to invalidate
+ * @granularity: defines the lowest granularity used for the invalidation:
+ * domain > PASID > addr
+ * @padding: reserved for future use (should be zero)
+ * @pasid_info: invalidation data when @granularity is %IOMMU_INV_GRANU_PASID
+ * @addr_info: invalidation data when @granularity is %IOMMU_INV_GRANU_ADDR
+ *
+ * Not all the combinations of cache/granularity are valid:
+ *
+ * +--------------+---------------+---------------+---------------+
+ * | type / | DEV_IOTLB | IOTLB | PASID |
+ * | granularity | | | cache |
+ * +==============+===============+===============+===============+
+ * | DOMAIN | N/A | Y | Y |
+ * +--------------+---------------+---------------+---------------+
+ * | PASID | Y | Y | Y |
+ * +--------------+---------------+---------------+---------------+
+ * | ADDR | Y | Y | N/A |
+ * +--------------+---------------+---------------+---------------+
+ *
+ * Invalidations by %IOMMU_INV_GRANU_DOMAIN don't take any argument other than
+ * @version and @cache.
+ *
+ * If multiple cache types are invalidated simultaneously, they all
+ * must support the used granularity.
+ */
+struct iommu_cache_invalidate_info {
+#define IOMMU_CACHE_INVALIDATE_INFO_VERSION_1 1
+ __u32 version;
+/* IOMMU paging structure cache */
+#define IOMMU_CACHE_INV_TYPE_IOTLB (1 << 0) /* IOMMU IOTLB */
+#define IOMMU_CACHE_INV_TYPE_DEV_IOTLB (1 << 1) /* Device IOTLB */
+#define IOMMU_CACHE_INV_TYPE_PASID (1 << 2) /* PASID cache */
+#define IOMMU_CACHE_INV_TYPE_NR (3)
+ __u8 cache;
+ __u8 granularity;
+ __u8 padding[2];
+ union {
+ struct iommu_inv_pasid_info pasid_info;
+ struct iommu_inv_addr_info addr_info;
+ };
+};
+
+/**
+ * struct iommu_gpasid_bind_data_vtd - Intel VT-d specific data on device and guest
+ * SVA binding.
+ *
+ * @flags: VT-d PASID table entry attributes
+ * @pat: Page attribute table data to compute effective memory type
+ * @emt: Extended memory type
+ *
+ * Only guest vIOMMU selectable and effective options are passed down to
+ * the host IOMMU.
+ */
+struct iommu_gpasid_bind_data_vtd {
+#define IOMMU_SVA_VTD_GPASID_SRE (1 << 0) /* supervisor request */
+#define IOMMU_SVA_VTD_GPASID_EAFE (1 << 1) /* extended access enable */
+#define IOMMU_SVA_VTD_GPASID_PCD (1 << 2) /* page-level cache disable */
+#define IOMMU_SVA_VTD_GPASID_PWT (1 << 3) /* page-level write through */
+#define IOMMU_SVA_VTD_GPASID_EMTE (1 << 4) /* extended mem type enable */
+#define IOMMU_SVA_VTD_GPASID_CD (1 << 5) /* PASID-level cache disable */
+ __u64 flags;
+ __u32 pat;
+ __u32 emt;
+};
+
+/**
+ * struct iommu_gpasid_bind_data - Information about device and guest PASID binding
+ * @version: Version of this data structure
+ * @format: PASID table entry format
+ * @flags: Additional information on guest bind request
+ * @gpgd: Guest page directory base of the guest mm to bind
+ * @hpasid: Process address space ID used for the guest mm in host IOMMU
+ * @gpasid: Process address space ID used for the guest mm in guest IOMMU
+ * @addr_width: Guest virtual address width
+ * @padding: Reserved for future use (should be zero)
+ * @vtd: Intel VT-d specific data
+ *
+ * Guest to host PASID mapping can be an identity or non-identity, where guest
+ * has its own PASID space. For non-identify mapping, guest to host PASID lookup
+ * is needed when VM programs guest PASID into an assigned device. VMM may
+ * trap such PASID programming then request host IOMMU driver to convert guest
+ * PASID to host PASID based on this bind data.
+ */
+struct iommu_gpasid_bind_data {
+#define IOMMU_GPASID_BIND_VERSION_1 1
+ __u32 version;
+#define IOMMU_PASID_FORMAT_INTEL_VTD 1
+ __u32 format;
+#define IOMMU_SVA_GPASID_VAL (1 << 0) /* guest PASID valid */
+ __u64 flags;
+ __u64 gpgd;
+ __u64 hpasid;
+ __u64 gpasid;
+ __u32 addr_width;
+ __u8 padding[12];
+ /* Vendor specific data */
+ union {
+ struct iommu_gpasid_bind_data_vtd vtd;
+ };
+};
+
#endif /* _UAPI_IOMMU_H */
diff --git a/include/uapi/linux/kcov.h b/include/uapi/linux/kcov.h
index 9529867717a8..1d0350e44ae3 100644
--- a/include/uapi/linux/kcov.h
+++ b/include/uapi/linux/kcov.h
@@ -4,9 +4,24 @@
#include <linux/types.h>
+/*
+ * Argument for KCOV_REMOTE_ENABLE ioctl, see Documentation/dev-tools/kcov.rst
+ * and the comment before kcov_remote_start() for usage details.
+ */
+struct kcov_remote_arg {
+ __u32 trace_mode; /* KCOV_TRACE_PC or KCOV_TRACE_CMP */
+ __u32 area_size; /* Length of coverage buffer in words */
+ __u32 num_handles; /* Size of handles array */
+ __aligned_u64 common_handle;
+ __aligned_u64 handles[0];
+};
+
+#define KCOV_REMOTE_MAX_HANDLES 0x100
+
#define KCOV_INIT_TRACE _IOR('c', 1, unsigned long)
#define KCOV_ENABLE _IO('c', 100)
#define KCOV_DISABLE _IO('c', 101)
+#define KCOV_REMOTE_ENABLE _IOW('c', 102, struct kcov_remote_arg)
enum {
/*
@@ -32,4 +47,17 @@ enum {
#define KCOV_CMP_SIZE(n) ((n) << 1)
#define KCOV_CMP_MASK KCOV_CMP_SIZE(3)
+#define KCOV_SUBSYSTEM_COMMON (0x00ull << 56)
+#define KCOV_SUBSYSTEM_USB (0x01ull << 56)
+
+#define KCOV_SUBSYSTEM_MASK (0xffull << 56)
+#define KCOV_INSTANCE_MASK (0xffffffffull)
+
+static inline __u64 kcov_remote_handle(__u64 subsys, __u64 inst)
+{
+ if (subsys & ~KCOV_SUBSYSTEM_MASK || inst & ~KCOV_INSTANCE_MASK)
+ return 0;
+ return subsys | inst;
+}
+
#endif /* _LINUX_KCOV_IOCTLS_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index 52641d8ca9e8..4b95f9a31a2f 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -235,6 +235,7 @@ struct kvm_hyperv_exit {
#define KVM_EXIT_S390_STSI 25
#define KVM_EXIT_IOAPIC_EOI 26
#define KVM_EXIT_HYPERV 27
+#define KVM_EXIT_ARM_NISV 28
/* For KVM_EXIT_INTERNAL_ERROR */
/* Emulate instruction failed. */
@@ -394,6 +395,11 @@ struct kvm_run {
} eoi;
/* KVM_EXIT_HYPERV */
struct kvm_hyperv_exit hyperv;
+ /* KVM_EXIT_ARM_NISV */
+ struct {
+ __u64 esr_iss;
+ __u64 fault_ipa;
+ } arm_nisv;
/* Fix the size of the union. */
char padding[256];
};
@@ -1000,6 +1006,10 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_PMU_EVENT_FILTER 173
#define KVM_CAP_ARM_IRQ_LINE_LAYOUT_2 174
#define KVM_CAP_HYPERV_DIRECT_TLBFLUSH 175
+#define KVM_CAP_PPC_GUEST_DEBUG_SSTEP 176
+#define KVM_CAP_ARM_NISV_TO_USER 177
+#define KVM_CAP_ARM_INJECT_EXT_DABT 178
+#define KVM_CAP_S390_VCPU_RESETS 179
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1227,6 +1237,8 @@ enum kvm_device_type {
#define KVM_DEV_TYPE_ARM_VGIC_ITS KVM_DEV_TYPE_ARM_VGIC_ITS
KVM_DEV_TYPE_XIVE,
#define KVM_DEV_TYPE_XIVE KVM_DEV_TYPE_XIVE
+ KVM_DEV_TYPE_ARM_PV_TIME,
+#define KVM_DEV_TYPE_ARM_PV_TIME KVM_DEV_TYPE_ARM_PV_TIME
KVM_DEV_TYPE_MAX,
};
@@ -1337,6 +1349,7 @@ struct kvm_s390_ucas_mapping {
#define KVM_PPC_GET_CPU_CHAR _IOR(KVMIO, 0xb1, struct kvm_ppc_cpu_char)
/* Available with KVM_CAP_PMU_EVENT_FILTER */
#define KVM_SET_PMU_EVENT_FILTER _IOW(KVMIO, 0xb2, struct kvm_pmu_event_filter)
+#define KVM_PPC_SVM_OFF _IO(KVMIO, 0xb3)
/* ioctl for vm fd */
#define KVM_CREATE_DEVICE _IOWR(KVMIO, 0xe0, struct kvm_create_device)
@@ -1461,6 +1474,10 @@ struct kvm_enc_region {
/* Available with KVM_CAP_ARM_SVE */
#define KVM_ARM_VCPU_FINALIZE _IOW(KVMIO, 0xc2, int)
+/* Available with KVM_CAP_S390_VCPU_RESETS */
+#define KVM_S390_NORMAL_RESET _IO(KVMIO, 0xc3)
+#define KVM_S390_CLEAR_RESET _IO(KVMIO, 0xc4)
+
/* Secure Encrypted Virtualization command */
enum sev_cmd_id {
/* Guest initialization commands */
diff --git a/include/uapi/linux/lwtunnel.h b/include/uapi/linux/lwtunnel.h
index de696ca12f2c..f6035f737193 100644
--- a/include/uapi/linux/lwtunnel.h
+++ b/include/uapi/linux/lwtunnel.h
@@ -27,6 +27,7 @@ enum lwtunnel_ip_t {
LWTUNNEL_IP_TOS,
LWTUNNEL_IP_FLAGS,
LWTUNNEL_IP_PAD,
+ LWTUNNEL_IP_OPTS,
__LWTUNNEL_IP_MAX,
};
@@ -41,12 +42,52 @@ enum lwtunnel_ip6_t {
LWTUNNEL_IP6_TC,
LWTUNNEL_IP6_FLAGS,
LWTUNNEL_IP6_PAD,
+ LWTUNNEL_IP6_OPTS,
__LWTUNNEL_IP6_MAX,
};
#define LWTUNNEL_IP6_MAX (__LWTUNNEL_IP6_MAX - 1)
enum {
+ LWTUNNEL_IP_OPTS_UNSPEC,
+ LWTUNNEL_IP_OPTS_GENEVE,
+ LWTUNNEL_IP_OPTS_VXLAN,
+ LWTUNNEL_IP_OPTS_ERSPAN,
+ __LWTUNNEL_IP_OPTS_MAX,
+};
+
+#define LWTUNNEL_IP_OPTS_MAX (__LWTUNNEL_IP_OPTS_MAX - 1)
+
+enum {
+ LWTUNNEL_IP_OPT_GENEVE_UNSPEC,
+ LWTUNNEL_IP_OPT_GENEVE_CLASS,
+ LWTUNNEL_IP_OPT_GENEVE_TYPE,
+ LWTUNNEL_IP_OPT_GENEVE_DATA,
+ __LWTUNNEL_IP_OPT_GENEVE_MAX,
+};
+
+#define LWTUNNEL_IP_OPT_GENEVE_MAX (__LWTUNNEL_IP_OPT_GENEVE_MAX - 1)
+
+enum {
+ LWTUNNEL_IP_OPT_VXLAN_UNSPEC,
+ LWTUNNEL_IP_OPT_VXLAN_GBP,
+ __LWTUNNEL_IP_OPT_VXLAN_MAX,
+};
+
+#define LWTUNNEL_IP_OPT_VXLAN_MAX (__LWTUNNEL_IP_OPT_VXLAN_MAX - 1)
+
+enum {
+ LWTUNNEL_IP_OPT_ERSPAN_UNSPEC,
+ LWTUNNEL_IP_OPT_ERSPAN_VER,
+ LWTUNNEL_IP_OPT_ERSPAN_INDEX,
+ LWTUNNEL_IP_OPT_ERSPAN_DIR,
+ LWTUNNEL_IP_OPT_ERSPAN_HWID,
+ __LWTUNNEL_IP_OPT_ERSPAN_MAX,
+};
+
+#define LWTUNNEL_IP_OPT_ERSPAN_MAX (__LWTUNNEL_IP_OPT_ERSPAN_MAX - 1)
+
+enum {
LWT_BPF_PROG_UNSPEC,
LWT_BPF_PROG_FD,
LWT_BPF_PROG_NAME,
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index 903cc2d2750b..d78064007b17 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -87,6 +87,7 @@
#define NSFS_MAGIC 0x6e736673
#define BPF_FS_MAGIC 0xcafe4a11
#define AAFS_MAGIC 0x5a3c69f0
+#define ZONEFS_MAGIC 0x5a4f4653
/* Since UDF 2.01 is ISO 13346 based... */
#define UDF_SUPER_MAGIC 0x15013346
@@ -94,5 +95,6 @@
#define ZSMALLOC_MAGIC 0x58295829
#define DMA_BUF_MAGIC 0x444d4142 /* "DMAB" */
#define Z3FOLD_MAGIC 0x33
+#define PPC_CMM_MAGIC 0xc7571590
#endif /* __LINUX_MAGIC_H__ */
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index 16c1fa2d89a4..84fa53ffb13f 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -64,7 +64,7 @@
#define MEDIA_BUS_FMT_RGB121212_1X36 0x1019
#define MEDIA_BUS_FMT_RGB161616_1X48 0x101a
-/* YUV (including grey) - next is 0x202d */
+/* YUV (including grey) - next is 0x202e */
#define MEDIA_BUS_FMT_Y8_1X8 0x2001
#define MEDIA_BUS_FMT_UV8_1X8 0x2015
#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002
@@ -86,6 +86,7 @@
#define MEDIA_BUS_FMT_VYUY12_2X12 0x201d
#define MEDIA_BUS_FMT_YUYV12_2X12 0x201e
#define MEDIA_BUS_FMT_YVYU12_2X12 0x201f
+#define MEDIA_BUS_FMT_Y14_1X14 0x202d
#define MEDIA_BUS_FMT_UYVY8_1X16 0x200f
#define MEDIA_BUS_FMT_VYUY8_1X16 0x2010
#define MEDIA_BUS_FMT_YUYV8_1X16 0x2011
diff --git a/include/uapi/linux/mii.h b/include/uapi/linux/mii.h
index 51b48e4be1f2..0b9c3beda345 100644
--- a/include/uapi/linux/mii.h
+++ b/include/uapi/linux/mii.h
@@ -131,6 +131,18 @@
#define NWAYTEST_LOOPBACK 0x0100 /* Enable loopback for N-way */
#define NWAYTEST_RESV2 0xfe00 /* Unused... */
+/* MAC and PHY tx_config_Reg[15:0] for SGMII in-band auto-negotiation.*/
+#define ADVERTISE_SGMII 0x0001 /* MAC can do SGMII */
+#define LPA_SGMII 0x0001 /* PHY can do SGMII */
+#define LPA_SGMII_DPX_SPD_MASK 0x1C00 /* SGMII duplex and speed bits */
+#define LPA_SGMII_10HALF 0x0000 /* Can do 10mbps half-duplex */
+#define LPA_SGMII_10FULL 0x1000 /* Can do 10mbps full-duplex */
+#define LPA_SGMII_100HALF 0x0400 /* Can do 100mbps half-duplex */
+#define LPA_SGMII_100FULL 0x1400 /* Can do 100mbps full-duplex */
+#define LPA_SGMII_1000HALF 0x0800 /* Can do 1000mbps half-duplex */
+#define LPA_SGMII_1000FULL 0x1800 /* Can do 1000mbps full-duplex */
+#define LPA_SGMII_LINK 0x8000 /* PHY link with copper-side partner */
+
/* 1000BASE-T Control register */
#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */
#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */
diff --git a/include/uapi/linux/msg.h b/include/uapi/linux/msg.h
index e4a0d9a9a9e8..01ee8d54c1c8 100644
--- a/include/uapi/linux/msg.h
+++ b/include/uapi/linux/msg.h
@@ -19,9 +19,9 @@ struct msqid_ds {
struct ipc_perm msg_perm;
struct msg *msg_first; /* first message on queue,unused */
struct msg *msg_last; /* last message in queue,unused */
- __kernel_time_t msg_stime; /* last msgsnd time */
- __kernel_time_t msg_rtime; /* last msgrcv time */
- __kernel_time_t msg_ctime; /* last change time */
+ __kernel_old_time_t msg_stime; /* last msgsnd time */
+ __kernel_old_time_t msg_rtime; /* last msgrcv time */
+ __kernel_old_time_t msg_ctime; /* last change time */
unsigned long msg_lcbytes; /* Reuse junk fields for 32 bit */
unsigned long msg_lqbytes; /* ditto */
unsigned short msg_cbytes; /* current number of bytes on queue */
diff --git a/include/uapi/linux/net_tstamp.h b/include/uapi/linux/net_tstamp.h
index e5b39721c6e4..f96e650d0af9 100644
--- a/include/uapi/linux/net_tstamp.h
+++ b/include/uapi/linux/net_tstamp.h
@@ -90,6 +90,14 @@ enum hwtstamp_tx_types {
* queue.
*/
HWTSTAMP_TX_ONESTEP_SYNC,
+
+ /*
+ * Same as HWTSTAMP_TX_ONESTEP_SYNC, but also enables time
+ * stamp insertion directly into PDelay_Resp packets. In this
+ * case, neither transmitted Sync nor PDelay_Resp packets will
+ * receive a time stamp via the socket error queue.
+ */
+ HWTSTAMP_TX_ONESTEP_P2P,
};
/* possible values for hwtstamp_config->rx_filter */
diff --git a/include/uapi/linux/netfilter/ipset/ip_set.h b/include/uapi/linux/netfilter/ipset/ip_set.h
index eea166c52c36..11a72a938eb1 100644
--- a/include/uapi/linux/netfilter/ipset/ip_set.h
+++ b/include/uapi/linux/netfilter/ipset/ip_set.h
@@ -205,6 +205,8 @@ enum ipset_cadt_flags {
IPSET_FLAG_WITH_FORCEADD = (1 << IPSET_FLAG_BIT_WITH_FORCEADD),
IPSET_FLAG_BIT_WITH_SKBINFO = 6,
IPSET_FLAG_WITH_SKBINFO = (1 << IPSET_FLAG_BIT_WITH_SKBINFO),
+ IPSET_FLAG_BIT_IFACE_WILDCARD = 7,
+ IPSET_FLAG_IFACE_WILDCARD = (1 << IPSET_FLAG_BIT_IFACE_WILDCARD),
IPSET_FLAG_CADT_MAX = 15,
};
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index 336014bf8868..b6f0bb1dc799 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -97,6 +97,15 @@ enum ip_conntrack_status {
IPS_UNTRACKED_BIT = 12,
IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT),
+#ifdef __KERNEL__
+ /* Re-purposed for in-kernel use:
+ * Tags a conntrack entry that clashed with an existing entry
+ * on insert.
+ */
+ IPS_NAT_CLASH_BIT = IPS_UNTRACKED_BIT,
+ IPS_NAT_CLASH = IPS_UNTRACKED,
+#endif
+
/* Conntrack got a helper explicitly attached via CT target. */
IPS_HELPER_BIT = 13,
IPS_HELPER = (1 << IPS_HELPER_BIT),
@@ -110,7 +119,8 @@ enum ip_conntrack_status {
*/
IPS_UNCHANGEABLE_MASK = (IPS_NAT_DONE_MASK | IPS_NAT_MASK |
IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING |
- IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_OFFLOAD),
+ IPS_SEQ_ADJUST | IPS_TEMPLATE | IPS_UNTRACKED |
+ IPS_OFFLOAD),
__IPS_MAX_BIT = 15,
};
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index ed8881ad18ed..065218a20bb7 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -48,6 +48,7 @@ enum nft_registers {
#define NFT_REG_SIZE 16
#define NFT_REG32_SIZE 4
+#define NFT_REG32_COUNT (NFT_REG32_15 - NFT_REG32_00 + 1)
/**
* enum nft_verdicts - nf_tables internal verdicts
@@ -144,12 +145,14 @@ enum nft_list_attributes {
* @NFTA_HOOK_HOOKNUM: netfilter hook number (NLA_U32)
* @NFTA_HOOK_PRIORITY: netfilter hook priority (NLA_U32)
* @NFTA_HOOK_DEV: netdevice name (NLA_STRING)
+ * @NFTA_HOOK_DEVS: list of netdevices (NLA_NESTED)
*/
enum nft_hook_attributes {
NFTA_HOOK_UNSPEC,
NFTA_HOOK_HOOKNUM,
NFTA_HOOK_PRIORITY,
NFTA_HOOK_DEV,
+ NFTA_HOOK_DEVS,
__NFTA_HOOK_MAX
};
#define NFTA_HOOK_MAX (__NFTA_HOOK_MAX - 1)
@@ -299,15 +302,29 @@ enum nft_set_policies {
* enum nft_set_desc_attributes - set element description
*
* @NFTA_SET_DESC_SIZE: number of elements in set (NLA_U32)
+ * @NFTA_SET_DESC_CONCAT: description of field concatenation (NLA_NESTED)
*/
enum nft_set_desc_attributes {
NFTA_SET_DESC_UNSPEC,
NFTA_SET_DESC_SIZE,
+ NFTA_SET_DESC_CONCAT,
__NFTA_SET_DESC_MAX
};
#define NFTA_SET_DESC_MAX (__NFTA_SET_DESC_MAX - 1)
/**
+ * enum nft_set_field_attributes - attributes of concatenated fields
+ *
+ * @NFTA_SET_FIELD_LEN: length of single field, in bits (NLA_U32)
+ */
+enum nft_set_field_attributes {
+ NFTA_SET_FIELD_UNSPEC,
+ NFTA_SET_FIELD_LEN,
+ __NFTA_SET_FIELD_MAX
+};
+#define NFTA_SET_FIELD_MAX (__NFTA_SET_FIELD_MAX - 1)
+
+/**
* enum nft_set_attributes - nf_tables set netlink attributes
*
* @NFTA_SET_TABLE: table name (NLA_STRING)
@@ -368,6 +385,7 @@ enum nft_set_elem_flags {
* @NFTA_SET_ELEM_USERDATA: user data (NLA_BINARY)
* @NFTA_SET_ELEM_EXPR: expression (NLA_NESTED: nft_expr_attributes)
* @NFTA_SET_ELEM_OBJREF: stateful object reference (NLA_STRING)
+ * @NFTA_SET_ELEM_KEY_END: closing key value (NLA_NESTED: nft_data)
*/
enum nft_set_elem_attributes {
NFTA_SET_ELEM_UNSPEC,
@@ -380,6 +398,7 @@ enum nft_set_elem_attributes {
NFTA_SET_ELEM_EXPR,
NFTA_SET_ELEM_PAD,
NFTA_SET_ELEM_OBJREF,
+ NFTA_SET_ELEM_KEY_END,
__NFTA_SET_ELEM_MAX
};
#define NFTA_SET_ELEM_MAX (__NFTA_SET_ELEM_MAX - 1)
@@ -483,6 +502,20 @@ enum nft_immediate_attributes {
#define NFTA_IMMEDIATE_MAX (__NFTA_IMMEDIATE_MAX - 1)
/**
+ * enum nft_bitwise_ops - nf_tables bitwise operations
+ *
+ * @NFT_BITWISE_BOOL: mask-and-xor operation used to implement NOT, AND, OR and
+ * XOR boolean operations
+ * @NFT_BITWISE_LSHIFT: left-shift operation
+ * @NFT_BITWISE_RSHIFT: right-shift operation
+ */
+enum nft_bitwise_ops {
+ NFT_BITWISE_BOOL,
+ NFT_BITWISE_LSHIFT,
+ NFT_BITWISE_RSHIFT,
+};
+
+/**
* enum nft_bitwise_attributes - nf_tables bitwise expression netlink attributes
*
* @NFTA_BITWISE_SREG: source register (NLA_U32: nft_registers)
@@ -490,16 +523,20 @@ enum nft_immediate_attributes {
* @NFTA_BITWISE_LEN: length of operands (NLA_U32)
* @NFTA_BITWISE_MASK: mask value (NLA_NESTED: nft_data_attributes)
* @NFTA_BITWISE_XOR: xor value (NLA_NESTED: nft_data_attributes)
+ * @NFTA_BITWISE_OP: type of operation (NLA_U32: nft_bitwise_ops)
+ * @NFTA_BITWISE_DATA: argument for non-boolean operations
+ * (NLA_NESTED: nft_data_attributes)
*
- * The bitwise expression performs the following operation:
+ * The bitwise expression supports boolean and shift operations. It implements
+ * the boolean operations by performing the following operation:
*
* dreg = (sreg & mask) ^ xor
*
- * which allow to express all bitwise operations:
+ * with these mask and xor values:
*
* mask xor
* NOT: 1 1
- * OR: 0 x
+ * OR: ~x x
* XOR: 1 x
* AND: x 0
*/
@@ -510,6 +547,8 @@ enum nft_bitwise_attributes {
NFTA_BITWISE_LEN,
NFTA_BITWISE_MASK,
NFTA_BITWISE_XOR,
+ NFTA_BITWISE_OP,
+ NFTA_BITWISE_DATA,
__NFTA_BITWISE_MAX
};
#define NFTA_BITWISE_MAX (__NFTA_BITWISE_MAX - 1)
@@ -803,6 +842,8 @@ enum nft_exthdr_attributes {
* @NFT_META_TIME_NS: time since epoch (in nanoseconds)
* @NFT_META_TIME_DAY: day of week (from 0 = Sunday to 6 = Saturday)
* @NFT_META_TIME_HOUR: hour of day (in seconds)
+ * @NFT_META_SDIF: slave device interface index
+ * @NFT_META_SDIFNAME: slave device interface name
*/
enum nft_meta_keys {
NFT_META_LEN,
@@ -838,6 +879,8 @@ enum nft_meta_keys {
NFT_META_TIME_NS,
NFT_META_TIME_DAY,
NFT_META_TIME_HOUR,
+ NFT_META_SDIF,
+ NFT_META_SDIFNAME,
};
/**
@@ -1516,6 +1559,7 @@ enum nft_object_attributes {
* @NFTA_FLOWTABLE_HOOK: netfilter hook configuration(NLA_U32)
* @NFTA_FLOWTABLE_USE: number of references to this flow table (NLA_U32)
* @NFTA_FLOWTABLE_HANDLE: object handle (NLA_U64)
+ * @NFTA_FLOWTABLE_FLAGS: flags (NLA_U32)
*/
enum nft_flowtable_attributes {
NFTA_FLOWTABLE_UNSPEC,
@@ -1525,6 +1569,7 @@ enum nft_flowtable_attributes {
NFTA_FLOWTABLE_USE,
NFTA_FLOWTABLE_HANDLE,
NFTA_FLOWTABLE_PAD,
+ NFTA_FLOWTABLE_FLAGS,
__NFTA_FLOWTABLE_MAX
};
#define NFTA_FLOWTABLE_MAX (__NFTA_FLOWTABLE_MAX - 1)
diff --git a/include/uapi/linux/netfilter/xt_sctp.h b/include/uapi/linux/netfilter/xt_sctp.h
index 4bc6d1a08781..b4d804a9fccb 100644
--- a/include/uapi/linux/netfilter/xt_sctp.h
+++ b/include/uapi/linux/netfilter/xt_sctp.h
@@ -41,19 +41,19 @@ struct xt_sctp_info {
#define SCTP_CHUNKMAP_SET(chunkmap, type) \
do { \
(chunkmap)[type / bytes(__u32)] |= \
- 1 << (type % bytes(__u32)); \
+ 1u << (type % bytes(__u32)); \
} while (0)
#define SCTP_CHUNKMAP_CLEAR(chunkmap, type) \
do { \
(chunkmap)[type / bytes(__u32)] &= \
- ~(1 << (type % bytes(__u32))); \
+ ~(1u << (type % bytes(__u32))); \
} while (0)
#define SCTP_CHUNKMAP_IS_SET(chunkmap, type) \
({ \
((chunkmap)[type / bytes (__u32)] & \
- (1 << (type % bytes (__u32)))) ? 1: 0; \
+ (1u << (type % bytes (__u32)))) ? 1: 0; \
})
#define SCTP_CHUNKMAP_RESET(chunkmap) \
diff --git a/include/uapi/linux/netfilter_arp/arp_tables.h b/include/uapi/linux/netfilter_arp/arp_tables.h
index a2a0927d9bd6..bbf5af2b67a8 100644
--- a/include/uapi/linux/netfilter_arp/arp_tables.h
+++ b/include/uapi/linux/netfilter_arp/arp_tables.h
@@ -199,7 +199,7 @@ struct arpt_get_entries {
/* Helper functions */
static __inline__ struct xt_entry_target *arpt_get_target(struct arpt_entry *e)
{
- return (void *)e + e->target_offset;
+ return (struct xt_entry_target *)((char *)e + e->target_offset);
}
/*
diff --git a/include/uapi/linux/netfilter_bridge/ebtables.h b/include/uapi/linux/netfilter_bridge/ebtables.h
index 8076c940ffeb..a494cf43a755 100644
--- a/include/uapi/linux/netfilter_bridge/ebtables.h
+++ b/include/uapi/linux/netfilter_bridge/ebtables.h
@@ -194,7 +194,7 @@ struct ebt_entry {
static __inline__ struct ebt_entry_target *
ebt_get_target(struct ebt_entry *e)
{
- return (void *)e + e->target_offset;
+ return (struct ebt_entry_target *)((char *)e + e->target_offset);
}
/* {g,s}etsockopt numbers */
diff --git a/include/uapi/linux/netfilter_ipv4/ip_tables.h b/include/uapi/linux/netfilter_ipv4/ip_tables.h
index 6aaeb14bfce1..50c7fee625ae 100644
--- a/include/uapi/linux/netfilter_ipv4/ip_tables.h
+++ b/include/uapi/linux/netfilter_ipv4/ip_tables.h
@@ -222,7 +222,7 @@ struct ipt_get_entries {
static __inline__ struct xt_entry_target *
ipt_get_target(struct ipt_entry *e)
{
- return (void *)e + e->target_offset;
+ return (struct xt_entry_target *)((char *)e + e->target_offset);
}
/*
diff --git a/include/uapi/linux/netfilter_ipv6/ip6_tables.h b/include/uapi/linux/netfilter_ipv6/ip6_tables.h
index 031d0a43bed2..d9e364f96a5c 100644
--- a/include/uapi/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/uapi/linux/netfilter_ipv6/ip6_tables.h
@@ -262,7 +262,7 @@ struct ip6t_get_entries {
static __inline__ struct xt_entry_target *
ip6t_get_target(struct ip6t_entry *e)
{
- return (void *)e + e->target_offset;
+ return (struct xt_entry_target *)((char *)e + e->target_offset);
}
/*
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index beee59c831a7..5eab191607f8 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -249,6 +249,22 @@
*/
/**
+ * DOC: VLAN offload support for setting group keys and binding STAs to VLANs
+ *
+ * By setting @NL80211_EXT_FEATURE_VLAN_OFFLOAD flag drivers can indicate they
+ * support offloading VLAN functionality in a manner where the driver exposes a
+ * single netdev that uses VLAN tagged frames and separate VLAN-specific netdevs
+ * can then be added using RTM_NEWLINK/IFLA_VLAN_ID similarly to the Ethernet
+ * case. Frames received from stations that are not assigned to any VLAN are
+ * delivered on the main netdev and frames to such stations can be sent through
+ * that main netdev.
+ *
+ * %NL80211_CMD_NEW_KEY (for group keys), %NL80211_CMD_NEW_STATION, and
+ * %NL80211_CMD_SET_STATION will optionally specify vlan_id using
+ * %NL80211_ATTR_VLAN_ID.
+ */
+
+/**
* enum nl80211_commands - supported nl80211 commands
*
* @NL80211_CMD_UNSPEC: unspecified command to catch errors
@@ -571,6 +587,14 @@
* set of BSSID,frequency parameters is used (i.e., either the enforcing
* %NL80211_ATTR_MAC,%NL80211_ATTR_WIPHY_FREQ or the less strict
* %NL80211_ATTR_MAC_HINT and %NL80211_ATTR_WIPHY_FREQ_HINT).
+ * Driver shall not modify the IEs specified through %NL80211_ATTR_IE if
+ * %NL80211_ATTR_MAC is included. However, if %NL80211_ATTR_MAC_HINT is
+ * included, these IEs through %NL80211_ATTR_IE are specified by the user
+ * space based on the best possible BSS selected. Thus, if the driver ends
+ * up selecting a different BSS, it can modify these IEs accordingly (e.g.
+ * userspace asks the driver to perform PMKSA caching with BSS1 and the
+ * driver ends up selecting BSS2 with different PMKSA cache entry; RSNIE
+ * has to get updated with the apt PMKID).
* %NL80211_ATTR_PREV_BSSID can be used to request a reassociation within
* the ESS in case the device is already associated and an association with
* a different BSS is desired.
@@ -2373,6 +2397,9 @@ enum nl80211_commands {
* the allowed channel bandwidth configurations. (u8 attribute)
* Defined by IEEE P802.11ay/D4.0 section 9.4.2.251, Table 13.
*
+ * @NL80211_ATTR_VLAN_ID: VLAN ID (1..4094) for the station and VLAN group key
+ * (u16).
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2835,6 +2862,8 @@ enum nl80211_attrs {
NL80211_ATTR_WIPHY_EDMG_CHANNELS,
NL80211_ATTR_WIPHY_EDMG_BW_CONFIG,
+ NL80211_ATTR_VLAN_ID,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -5484,6 +5513,14 @@ enum nl80211_feature_flags {
* @NL80211_EXT_FEATURE_SAE_OFFLOAD: Device wants to do SAE authentication in
* station mode (SAE password is passed as part of the connect command).
*
+ * @NL80211_EXT_FEATURE_VLAN_OFFLOAD: The driver supports a single netdev
+ * with VLAN tagged frames and separate VLAN-specific netdevs added using
+ * vconfig similarly to the Ethernet case.
+ *
+ * @NL80211_EXT_FEATURE_AQL: The driver supports the Airtime Queue Limit (AQL)
+ * feature, which prevents bufferbloat by using the expected transmission
+ * time to limit the amount of data buffered in the hardware.
+ *
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
*/
@@ -5529,6 +5566,8 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_EXT_KEY_ID,
NL80211_EXT_FEATURE_STA_TX_PWR,
NL80211_EXT_FEATURE_SAE_OFFLOAD,
+ NL80211_EXT_FEATURE_VLAN_OFFLOAD,
+ NL80211_EXT_FEATURE_AQL,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
diff --git a/include/uapi/linux/openat2.h b/include/uapi/linux/openat2.h
new file mode 100644
index 000000000000..58b1eb711360
--- /dev/null
+++ b/include/uapi/linux/openat2.h
@@ -0,0 +1,39 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI_LINUX_OPENAT2_H
+#define _UAPI_LINUX_OPENAT2_H
+
+#include <linux/types.h>
+
+/*
+ * Arguments for how openat2(2) should open the target path. If only @flags and
+ * @mode are non-zero, then openat2(2) operates very similarly to openat(2).
+ *
+ * However, unlike openat(2), unknown or invalid bits in @flags result in
+ * -EINVAL rather than being silently ignored. @mode must be zero unless one of
+ * {O_CREAT, O_TMPFILE} are set.
+ *
+ * @flags: O_* flags.
+ * @mode: O_CREAT/O_TMPFILE file mode.
+ * @resolve: RESOLVE_* flags.
+ */
+struct open_how {
+ __u64 flags;
+ __u64 mode;
+ __u64 resolve;
+};
+
+/* how->resolve flags for openat2(2). */
+#define RESOLVE_NO_XDEV 0x01 /* Block mount-point crossings
+ (includes bind-mounts). */
+#define RESOLVE_NO_MAGICLINKS 0x02 /* Block traversal through procfs-style
+ "magic-links". */
+#define RESOLVE_NO_SYMLINKS 0x04 /* Block traversal through all symlinks
+ (implies OEXT_NO_MAGICLINKS) */
+#define RESOLVE_BENEATH 0x08 /* Block "lexical" trickery like
+ "..", symlinks, and absolute
+ paths which escape the dirfd. */
+#define RESOLVE_IN_ROOT 0x10 /* Make all jumps to "/" and ".."
+ be scoped inside the dirfd
+ (similar to chroot(2)). */
+
+#endif /* _UAPI_LINUX_OPENAT2_H */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 1887a451c388..ae2bff14e7e1 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -173,6 +173,7 @@ enum ovs_packet_cmd {
* @OVS_PACKET_ATTR_LEN: Packet size before truncation.
* %OVS_PACKET_ATTR_USERSPACE action specify the Maximum received fragment
* size.
+ * @OVS_PACKET_ATTR_HASH: Packet hash info (e.g. hash, sw_hash and l4_hash in skb).
*
* These attributes follow the &struct ovs_header within the Generic Netlink
* payload for %OVS_PACKET_* commands.
@@ -190,7 +191,8 @@ enum ovs_packet_attr {
OVS_PACKET_ATTR_PROBE, /* Packet operation is a feature probe,
error logging should be suppressed. */
OVS_PACKET_ATTR_MRU, /* Maximum received IP fragment size. */
- OVS_PACKET_ATTR_LEN, /* Packet size before truncation. */
+ OVS_PACKET_ATTR_LEN, /* Packet size before truncation. */
+ OVS_PACKET_ATTR_HASH, /* Packet hash. */
__OVS_PACKET_ATTR_MAX
};
@@ -671,6 +673,32 @@ struct ovs_action_push_mpls {
};
/**
+ * struct ovs_action_add_mpls - %OVS_ACTION_ATTR_ADD_MPLS action
+ * argument.
+ * @mpls_lse: MPLS label stack entry to push.
+ * @mpls_ethertype: Ethertype to set in the encapsulating ethernet frame.
+ * @tun_flags: MPLS tunnel attributes.
+ *
+ * The only values @mpls_ethertype should ever be given are %ETH_P_MPLS_UC and
+ * %ETH_P_MPLS_MC, indicating MPLS unicast or multicast. Other are rejected.
+ */
+struct ovs_action_add_mpls {
+ __be32 mpls_lse;
+ __be16 mpls_ethertype; /* Either %ETH_P_MPLS_UC or %ETH_P_MPLS_MC */
+ __u16 tun_flags;
+};
+
+#define OVS_MPLS_L3_TUNNEL_FLAG_MASK (1 << 0) /* Flag to specify the place of
+ * insertion of MPLS header.
+ * When false, the MPLS header
+ * will be inserted at the start
+ * of the packet.
+ * When true, the MPLS header
+ * will be inserted at the start
+ * of the l3 header.
+ */
+
+/**
* struct ovs_action_push_vlan - %OVS_ACTION_ATTR_PUSH_VLAN action argument.
* @vlan_tpid: Tag protocol identifier (TPID) to push.
* @vlan_tci: Tag control identifier (TCI) to push. The CFI bit must be set
@@ -890,6 +918,10 @@ struct check_pkt_len_arg {
* @OVS_ACTION_ATTR_CHECK_PKT_LEN: Check the packet length and execute a set
* of actions if greater than the specified packet length, else execute
* another set of actions.
+ * @OVS_ACTION_ATTR_ADD_MPLS: Push a new MPLS label stack entry at the
+ * start of the packet or at the start of the l3 header depending on the value
+ * of l3 tunnel flag in the tun_flags field of OVS_ACTION_ATTR_ADD_MPLS
+ * argument.
*
* Only a single header can be set with a single %OVS_ACTION_ATTR_SET. Not all
* fields within a header are modifiable, e.g. the IPv4 protocol and fragment
@@ -925,6 +957,7 @@ enum ovs_action_attr {
OVS_ACTION_ATTR_METER, /* u32 meter ID. */
OVS_ACTION_ATTR_CLONE, /* Nested OVS_CLONE_ATTR_*. */
OVS_ACTION_ATTR_CHECK_PKT_LEN, /* Nested OVS_CHECK_PKT_LEN_ATTR_*. */
+ OVS_ACTION_ATTR_ADD_MPLS, /* struct ovs_action_add_mpls. */
__OVS_ACTION_ATTR_MAX, /* Nothing past this will be accepted
* from userspace. */
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 29d6e93fd15e..5437690483cd 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -34,6 +34,7 @@
* of which the first 64 bytes are standardized as follows:
*/
#define PCI_STD_HEADER_SIZEOF 64
+#define PCI_STD_NUM_BARS 6 /* Number of standard BARs */
#define PCI_VENDOR_ID 0x00 /* 16 bits */
#define PCI_DEVICE_ID 0x02 /* 16 bits */
#define PCI_COMMAND 0x04 /* 16 bits */
@@ -673,6 +674,9 @@
#define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */
#define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */
#define PCI_EXP_LNKCTL2_TLS_32_0GT 0x0005 /* Supported Speed 32GT/s */
+#define PCI_EXP_LNKCTL2_ENTER_COMP 0x0010 /* Enter Compliance */
+#define PCI_EXP_LNKCTL2_TX_MARGIN 0x0380 /* Transmit Margin */
+#define PCI_EXP_LNKCTL2_HASD 0x0020 /* HW Autonomous Speed Disable */
#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */
#define PCI_CAP_EXP_ENDPOINT_SIZEOF_V2 52 /* v2 endpoints with link end here */
#define PCI_EXP_SLTCAP2 52 /* Slot Capabilities 2 */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index bb7b271397a6..397cfd65b3fe 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -141,8 +141,9 @@ enum perf_event_sample_format {
PERF_SAMPLE_TRANSACTION = 1U << 17,
PERF_SAMPLE_REGS_INTR = 1U << 18,
PERF_SAMPLE_PHYS_ADDR = 1U << 19,
+ PERF_SAMPLE_AUX = 1U << 20,
- PERF_SAMPLE_MAX = 1U << 20, /* non-ABI */
+ PERF_SAMPLE_MAX = 1U << 21, /* non-ABI */
__PERF_SAMPLE_CALLCHAIN_EARLY = 1ULL << 63, /* non-ABI; internal use */
};
@@ -180,6 +181,8 @@ enum perf_branch_sample_type_shift {
PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT = 16, /* save branch type */
+ PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT = 17, /* save low level index of raw branch records */
+
PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
};
@@ -207,6 +210,8 @@ enum perf_branch_sample_type {
PERF_SAMPLE_BRANCH_TYPE_SAVE =
1U << PERF_SAMPLE_BRANCH_TYPE_SAVE_SHIFT,
+ PERF_SAMPLE_BRANCH_HW_INDEX = 1U << PERF_SAMPLE_BRANCH_HW_INDEX_SHIFT,
+
PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
};
@@ -300,6 +305,7 @@ enum perf_event_read_format {
/* add: sample_stack_user */
#define PERF_ATTR_SIZE_VER4 104 /* add: sample_regs_intr */
#define PERF_ATTR_SIZE_VER5 112 /* add: aux_watermark */
+#define PERF_ATTR_SIZE_VER6 120 /* add: aux_sample_size */
/*
* Hardware event_id to monitor via a performance monitoring event:
@@ -424,7 +430,9 @@ struct perf_event_attr {
*/
__u32 aux_watermark;
__u16 sample_max_stack;
- __u16 __reserved_2; /* align to __u64 */
+ __u16 __reserved_2;
+ __u32 aux_sample_size;
+ __u32 __reserved_3;
};
/*
@@ -849,7 +857,9 @@ enum perf_event_type {
* char data[size];}&& PERF_SAMPLE_RAW
*
* { u64 nr;
- * { u64 from, to, flags } lbr[nr];} && PERF_SAMPLE_BRANCH_STACK
+ * { u64 hw_idx; } && PERF_SAMPLE_BRANCH_HW_INDEX
+ * { u64 from, to, flags } lbr[nr];
+ * } && PERF_SAMPLE_BRANCH_STACK
*
* { u64 abi; # enum perf_sample_regs_abi
* u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_USER
@@ -864,6 +874,8 @@ enum perf_event_type {
* { u64 abi; # enum perf_sample_regs_abi
* u64 regs[weight(mask)]; } && PERF_SAMPLE_REGS_INTR
* { u64 phys_addr;} && PERF_SAMPLE_PHYS_ADDR
+ * { u64 size;
+ * char data[size]; } && PERF_SAMPLE_AUX
* };
*/
PERF_RECORD_SAMPLE = 9,
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index a6aa466fac9e..449a63971451 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -16,9 +16,14 @@ enum {
TCA_ACT_STATS,
TCA_ACT_PAD,
TCA_ACT_COOKIE,
+ TCA_ACT_FLAGS,
__TCA_ACT_MAX
};
+#define TCA_ACT_FLAGS_NO_PERCPU_STATS 1 /* Don't use percpu allocator for
+ * actions stats.
+ */
+
#define TCA_ACT_MAX __TCA_ACT_MAX
#define TCA_OLD_COMPAT (TCA_ACT_MAX+1)
#define TCA_ACT_MAX_PRIO 32
@@ -566,6 +571,14 @@ enum {
* TCA_FLOWER_KEY_ENC_OPT_GENEVE_
* attributes
*/
+ TCA_FLOWER_KEY_ENC_OPTS_VXLAN, /* Nested
+ * TCA_FLOWER_KEY_ENC_OPT_VXLAN_
+ * attributes
+ */
+ TCA_FLOWER_KEY_ENC_OPTS_ERSPAN, /* Nested
+ * TCA_FLOWER_KEY_ENC_OPT_ERSPAN_
+ * attributes
+ */
__TCA_FLOWER_KEY_ENC_OPTS_MAX,
};
@@ -584,6 +597,27 @@ enum {
(__TCA_FLOWER_KEY_ENC_OPT_GENEVE_MAX - 1)
enum {
+ TCA_FLOWER_KEY_ENC_OPT_VXLAN_UNSPEC,
+ TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP, /* u32 */
+ __TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX,
+};
+
+#define TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX \
+ (__TCA_FLOWER_KEY_ENC_OPT_VXLAN_MAX - 1)
+
+enum {
+ TCA_FLOWER_KEY_ENC_OPT_ERSPAN_UNSPEC,
+ TCA_FLOWER_KEY_ENC_OPT_ERSPAN_VER, /* u8 */
+ TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX, /* be32 */
+ TCA_FLOWER_KEY_ENC_OPT_ERSPAN_DIR, /* u8 */
+ TCA_FLOWER_KEY_ENC_OPT_ERSPAN_HWID, /* u8 */
+ __TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX,
+};
+
+#define TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX \
+ (__TCA_FLOWER_KEY_ENC_OPT_ERSPAN_MAX - 1)
+
+enum {
TCA_FLOWER_KEY_FLAGS_IS_FRAGMENT = (1 << 0),
TCA_FLOWER_KEY_FLAGS_FRAG_IS_FIRST = (1 << 1),
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index 5011259b8f67..bbe791b24168 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -950,19 +950,56 @@ enum {
TCA_PIE_BETA,
TCA_PIE_ECN,
TCA_PIE_BYTEMODE,
+ TCA_PIE_DQ_RATE_ESTIMATOR,
__TCA_PIE_MAX
};
#define TCA_PIE_MAX (__TCA_PIE_MAX - 1)
struct tc_pie_xstats {
- __u64 prob; /* current probability */
- __u32 delay; /* current delay in ms */
- __u32 avg_dq_rate; /* current average dq_rate in bits/pie_time */
- __u32 packets_in; /* total number of packets enqueued */
- __u32 dropped; /* packets dropped due to pie_action */
- __u32 overlimit; /* dropped due to lack of space in queue */
- __u32 maxq; /* maximum queue size */
- __u32 ecn_mark; /* packets marked with ecn*/
+ __u64 prob; /* current probability */
+ __u32 delay; /* current delay in ms */
+ __u32 avg_dq_rate; /* current average dq_rate in
+ * bits/pie_time
+ */
+ __u32 dq_rate_estimating; /* is avg_dq_rate being calculated? */
+ __u32 packets_in; /* total number of packets enqueued */
+ __u32 dropped; /* packets dropped due to pie_action */
+ __u32 overlimit; /* dropped due to lack of space
+ * in queue
+ */
+ __u32 maxq; /* maximum queue size */
+ __u32 ecn_mark; /* packets marked with ecn*/
+};
+
+/* FQ PIE */
+enum {
+ TCA_FQ_PIE_UNSPEC,
+ TCA_FQ_PIE_LIMIT,
+ TCA_FQ_PIE_FLOWS,
+ TCA_FQ_PIE_TARGET,
+ TCA_FQ_PIE_TUPDATE,
+ TCA_FQ_PIE_ALPHA,
+ TCA_FQ_PIE_BETA,
+ TCA_FQ_PIE_QUANTUM,
+ TCA_FQ_PIE_MEMORY_LIMIT,
+ TCA_FQ_PIE_ECN_PROB,
+ TCA_FQ_PIE_ECN,
+ TCA_FQ_PIE_BYTEMODE,
+ TCA_FQ_PIE_DQ_RATE_ESTIMATOR,
+ __TCA_FQ_PIE_MAX
+};
+#define TCA_FQ_PIE_MAX (__TCA_FQ_PIE_MAX - 1)
+
+struct tc_fq_pie_xstats {
+ __u32 packets_in; /* total number of packets enqueued */
+ __u32 dropped; /* packets dropped due to fq_pie_action */
+ __u32 overlimit; /* dropped due to lack of space in queue */
+ __u32 overmemory; /* dropped due to lack of memory in queue */
+ __u32 ecn_mark; /* packets marked with ecn */
+ __u32 new_flow_count; /* count of new flows created by packets */
+ __u32 new_flows_len; /* count of flows in new list */
+ __u32 old_flows_len; /* count of flows in old list */
+ __u32 memory_usage; /* total memory across all queues */
};
/* CBS */
@@ -1181,4 +1218,21 @@ enum {
#define TCA_TAPRIO_ATTR_MAX (__TCA_TAPRIO_ATTR_MAX - 1)
+/* ETS */
+
+#define TCQ_ETS_MAX_BANDS 16
+
+enum {
+ TCA_ETS_UNSPEC,
+ TCA_ETS_NBANDS, /* u8 */
+ TCA_ETS_NSTRICT, /* u8 */
+ TCA_ETS_QUANTA, /* nested TCA_ETS_QUANTA_BAND */
+ TCA_ETS_QUANTA_BAND, /* u32 */
+ TCA_ETS_PRIOMAP, /* nested TCA_ETS_PRIOMAP_BAND */
+ TCA_ETS_PRIOMAP_BAND, /* u8 */
+ __TCA_ETS_MAX,
+};
+
+#define TCA_ETS_MAX (__TCA_ETS_MAX - 1)
+
#endif
diff --git a/include/uapi/linux/ppp-ioctl.h b/include/uapi/linux/ppp-ioctl.h
index 88b5f9990320..7bd2a5a75348 100644
--- a/include/uapi/linux/ppp-ioctl.h
+++ b/include/uapi/linux/ppp-ioctl.h
@@ -104,6 +104,8 @@ struct pppol2tp_ioc_stats {
#define PPPIOCGDEBUG _IOR('t', 65, int) /* Read debug level */
#define PPPIOCSDEBUG _IOW('t', 64, int) /* Set debug level */
#define PPPIOCGIDLE _IOR('t', 63, struct ppp_idle) /* get idle time */
+#define PPPIOCGIDLE32 _IOR('t', 63, struct ppp_idle32) /* 32-bit times */
+#define PPPIOCGIDLE64 _IOR('t', 63, struct ppp_idle64) /* 64-bit times */
#define PPPIOCNEWUNIT _IOWR('t', 62, int) /* create new ppp unit */
#define PPPIOCATTACH _IOW('t', 61, int) /* attach to ppp unit */
#define PPPIOCDETACH _IOW('t', 60, int) /* obsolete, do not use */
diff --git a/include/uapi/linux/ppp_defs.h b/include/uapi/linux/ppp_defs.h
index fff51b91b409..20286bd90ab5 100644
--- a/include/uapi/linux/ppp_defs.h
+++ b/include/uapi/linux/ppp_defs.h
@@ -142,10 +142,24 @@ struct ppp_comp_stats {
/*
* The following structure records the time in seconds since
* the last NP packet was sent or received.
+ *
+ * Linux implements both 32-bit and 64-bit time_t versions
+ * for compatibility with user space that defines ppp_idle
+ * based on the libc time_t.
*/
struct ppp_idle {
- __kernel_time_t xmit_idle; /* time since last NP packet sent */
- __kernel_time_t recv_idle; /* time since last NP packet received */
+ __kernel_old_time_t xmit_idle; /* time since last NP packet sent */
+ __kernel_old_time_t recv_idle; /* time since last NP packet received */
+};
+
+struct ppp_idle32 {
+ __s32 xmit_idle; /* time since last NP packet sent */
+ __s32 recv_idle; /* time since last NP packet received */
+};
+
+struct ppp_idle64 {
+ __s64 xmit_idle; /* time since last NP packet sent */
+ __s64 recv_idle; /* time since last NP packet received */
};
#endif /* _UAPI_PPP_DEFS_H_ */
diff --git a/include/uapi/linux/prctl.h b/include/uapi/linux/prctl.h
index 7da1b37b27aa..07b4f8131e36 100644
--- a/include/uapi/linux/prctl.h
+++ b/include/uapi/linux/prctl.h
@@ -234,4 +234,8 @@ struct prctl_mm_map {
#define PR_GET_TAGGED_ADDR_CTRL 56
# define PR_TAGGED_ADDR_ENABLE (1UL << 0)
+/* Control reclaim behavior when allocating memory */
+#define PR_SET_IO_FLUSHER 57
+#define PR_GET_IO_FLUSHER 58
+
#endif /* _LINUX_PRCTL_H */
diff --git a/include/uapi/linux/psp-sev.h b/include/uapi/linux/psp-sev.h
index 592a0c1b77c9..0549a5c622bf 100644
--- a/include/uapi/linux/psp-sev.h
+++ b/include/uapi/linux/psp-sev.h
@@ -58,6 +58,9 @@ typedef enum {
SEV_RET_HWSEV_RET_PLATFORM,
SEV_RET_HWSEV_RET_UNSAFE,
SEV_RET_UNSUPPORTED,
+ SEV_RET_INVALID_PARAM,
+ SEV_RET_RESOURCE_LIMIT,
+ SEV_RET_SECURE_DATA_INVALID,
SEV_RET_MAX,
} sev_ret_code;
diff --git a/include/uapi/linux/random.h b/include/uapi/linux/random.h
index 26ee91300e3e..dcc1b3e6106f 100644
--- a/include/uapi/linux/random.h
+++ b/include/uapi/linux/random.h
@@ -48,9 +48,11 @@ struct rand_pool_info {
* Flags for getrandom(2)
*
* GRND_NONBLOCK Don't block and return EAGAIN instead
- * GRND_RANDOM Use the /dev/random pool instead of /dev/urandom
+ * GRND_RANDOM No effect
+ * GRND_INSECURE Return non-cryptographic random bytes
*/
#define GRND_NONBLOCK 0x0001
#define GRND_RANDOM 0x0002
+#define GRND_INSECURE 0x0004
#endif /* _UAPI_LINUX_RANDOM_H */
diff --git a/include/uapi/linux/resource.h b/include/uapi/linux/resource.h
index cc00fd079631..74ef57b38f9f 100644
--- a/include/uapi/linux/resource.h
+++ b/include/uapi/linux/resource.h
@@ -22,8 +22,8 @@
#define RUSAGE_THREAD 1 /* only the calling thread */
struct rusage {
- struct timeval ru_utime; /* user time used */
- struct timeval ru_stime; /* system time used */
+ struct __kernel_old_timeval ru_utime; /* user time used */
+ struct __kernel_old_timeval ru_stime; /* system time used */
__kernel_long_t ru_maxrss; /* maximum resident set size */
__kernel_long_t ru_ixrss; /* integral shared memory size */
__kernel_long_t ru_idrss; /* integral unshared data size */
diff --git a/include/uapi/linux/rtc.h b/include/uapi/linux/rtc.h
index 2ad1788968d0..095af360326a 100644
--- a/include/uapi/linux/rtc.h
+++ b/include/uapi/linux/rtc.h
@@ -92,7 +92,12 @@ struct rtc_pll_info {
#define RTC_PLL_GET _IOR('p', 0x11, struct rtc_pll_info) /* Get PLL correction */
#define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */
-#define RTC_VL_READ _IOR('p', 0x13, int) /* Voltage low detector */
+#define RTC_VL_DATA_INVALID BIT(0) /* Voltage too low, RTC data is invalid */
+#define RTC_VL_BACKUP_LOW BIT(1) /* Backup voltage is low */
+#define RTC_VL_BACKUP_EMPTY BIT(2) /* Backup empty or not present */
+#define RTC_VL_ACCURACY_LOW BIT(3) /* Voltage is low, RTC accuracy is reduced */
+
+#define RTC_VL_READ _IOR('p', 0x13, unsigned int) /* Voltage low detection */
#define RTC_VL_CLR _IO('p', 0x14) /* Clear voltage low information */
/* interrupt flags */
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index ce2a623abb75..4a8c5b745157 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -164,6 +164,20 @@ enum {
RTM_GETNEXTHOP,
#define RTM_GETNEXTHOP RTM_GETNEXTHOP
+ RTM_NEWLINKPROP = 108,
+#define RTM_NEWLINKPROP RTM_NEWLINKPROP
+ RTM_DELLINKPROP,
+#define RTM_DELLINKPROP RTM_DELLINKPROP
+ RTM_GETLINKPROP,
+#define RTM_GETLINKPROP RTM_GETLINKPROP
+
+ RTM_NEWVLAN = 112,
+#define RTM_NEWNVLAN RTM_NEWVLAN
+ RTM_DELVLAN,
+#define RTM_DELVLAN RTM_DELVLAN
+ RTM_GETVLAN,
+#define RTM_GETVLAN RTM_GETVLAN
+
__RTM_MAX,
#define RTM_MAX (((__RTM_MAX + 3) & ~3) - 1)
};
@@ -302,6 +316,8 @@ enum rt_scope_t {
#define RTM_F_PREFIX 0x800 /* Prefix addresses */
#define RTM_F_LOOKUP_TABLE 0x1000 /* set rtm_table to FIB lookup result */
#define RTM_F_FIB_MATCH 0x2000 /* return full fib lookup match */
+#define RTM_F_OFFLOAD 0x4000 /* route is offloaded */
+#define RTM_F_TRAP 0x8000 /* route is trapping packets */
/* Reserved table identifiers */
@@ -714,6 +730,8 @@ enum rtnetlink_groups {
#define RTNLGRP_IPV6_MROUTE_R RTNLGRP_IPV6_MROUTE_R
RTNLGRP_NEXTHOP,
#define RTNLGRP_NEXTHOP RTNLGRP_NEXTHOP
+ RTNLGRP_BRVLAN,
+#define RTNLGRP_BRVLAN RTNLGRP_BRVLAN
__RTNLGRP_MAX
};
#define RTNLGRP_MAX (__RTNLGRP_MAX - 1)
diff --git a/include/uapi/linux/scc.h b/include/uapi/linux/scc.h
index c5bc7f747755..947edb17ce9d 100644
--- a/include/uapi/linux/scc.h
+++ b/include/uapi/linux/scc.h
@@ -4,6 +4,7 @@
#ifndef _UAPI_SCC_H
#define _UAPI_SCC_H
+#include <linux/sockios.h>
/* selection of hardware types */
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 25b4fa00bad1..2e3bc22c6f20 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -33,31 +33,54 @@
#define CLONE_NEWNET 0x40000000 /* New network namespace */
#define CLONE_IO 0x80000000 /* Clone io context */
+/* Flags for the clone3() syscall. */
+#define CLONE_CLEAR_SIGHAND 0x100000000ULL /* Clear any signal handler and reset to SIG_DFL. */
+
+/*
+ * cloning flags intersect with CSIGNAL so can be used with unshare and clone3
+ * syscalls only:
+ */
+#define CLONE_NEWTIME 0x00000080 /* New time namespace */
+
#ifndef __ASSEMBLY__
/**
* struct clone_args - arguments for the clone3 syscall
- * @flags: Flags for the new process as listed above.
- * All flags are valid except for CSIGNAL and
- * CLONE_DETACHED.
- * @pidfd: If CLONE_PIDFD is set, a pidfd will be
- * returned in this argument.
- * @child_tid: If CLONE_CHILD_SETTID is set, the TID of the
- * child process will be returned in the child's
- * memory.
- * @parent_tid: If CLONE_PARENT_SETTID is set, the TID of
- * the child process will be returned in the
- * parent's memory.
- * @exit_signal: The exit_signal the parent process will be
- * sent when the child exits.
- * @stack: Specify the location of the stack for the
- * child process.
- * Note, @stack is expected to point to the
- * lowest address. The stack direction will be
- * determined by the kernel and set up
- * appropriately based on @stack_size.
- * @stack_size: The size of the stack for the child process.
- * @tls: If CLONE_SETTLS is set, the tls descriptor
- * is set to tls.
+ * @flags: Flags for the new process as listed above.
+ * All flags are valid except for CSIGNAL and
+ * CLONE_DETACHED.
+ * @pidfd: If CLONE_PIDFD is set, a pidfd will be
+ * returned in this argument.
+ * @child_tid: If CLONE_CHILD_SETTID is set, the TID of the
+ * child process will be returned in the child's
+ * memory.
+ * @parent_tid: If CLONE_PARENT_SETTID is set, the TID of
+ * the child process will be returned in the
+ * parent's memory.
+ * @exit_signal: The exit_signal the parent process will be
+ * sent when the child exits.
+ * @stack: Specify the location of the stack for the
+ * child process.
+ * Note, @stack is expected to point to the
+ * lowest address. The stack direction will be
+ * determined by the kernel and set up
+ * appropriately based on @stack_size.
+ * @stack_size: The size of the stack for the child process.
+ * @tls: If CLONE_SETTLS is set, the tls descriptor
+ * is set to tls.
+ * @set_tid: Pointer to an array of type *pid_t. The size
+ * of the array is defined using @set_tid_size.
+ * This array is used to select PIDs/TIDs for
+ * newly created processes. The first element in
+ * this defines the PID in the most nested PID
+ * namespace. Each additional element in the array
+ * defines the PID in the parent PID namespace of
+ * the original PID namespace. If the array has
+ * less entries than the number of currently
+ * nested PID namespaces only the PIDs in the
+ * corresponding namespaces are set.
+ * @set_tid_size: This defines the size of the array referenced
+ * in @set_tid. This cannot be larger than the
+ * kernel's limit of nested PID namespaces.
*
* The structure is versioned by size and thus extensible.
* New struct members must go at the end of the struct and
@@ -72,10 +95,13 @@ struct clone_args {
__aligned_u64 stack;
__aligned_u64 stack_size;
__aligned_u64 tls;
+ __aligned_u64 set_tid;
+ __aligned_u64 set_tid_size;
};
#endif
#define CLONE_ARGS_SIZE_VER0 64 /* sizeof first published struct */
+#define CLONE_ARGS_SIZE_VER1 80 /* sizeof second published struct */
/*
* Scheduling policies
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index 6d5b164af55c..28ad40d9acba 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -105,6 +105,7 @@ typedef __s32 sctp_assoc_t;
#define SCTP_DEFAULT_SNDINFO 34
#define SCTP_AUTH_DEACTIVATE_KEY 35
#define SCTP_REUSE_PORT 36
+#define SCTP_PEER_ADDR_THLDS_V2 37
/* Internal Socket Options. Some of the sctp library functions are
* implemented using these socket options.
@@ -137,6 +138,8 @@ typedef __s32 sctp_assoc_t;
#define SCTP_ASCONF_SUPPORTED 128
#define SCTP_AUTH_SUPPORTED 129
#define SCTP_ECN_SUPPORTED 130
+#define SCTP_EXPOSE_POTENTIALLY_FAILED_STATE 131
+#define SCTP_EXPOSE_PF_STATE SCTP_EXPOSE_POTENTIALLY_FAILED_STATE
/* PR-SCTP policies */
#define SCTP_PR_SCTP_NONE 0x0000
@@ -410,6 +413,8 @@ enum sctp_spc_state {
SCTP_ADDR_ADDED,
SCTP_ADDR_MADE_PRIM,
SCTP_ADDR_CONFIRMED,
+ SCTP_ADDR_POTENTIALLY_FAILED,
+#define SCTP_ADDR_PF SCTP_ADDR_POTENTIALLY_FAILED
};
@@ -449,6 +454,16 @@ struct sctp_send_failed {
__u8 ssf_data[0];
};
+struct sctp_send_failed_event {
+ __u16 ssf_type;
+ __u16 ssf_flags;
+ __u32 ssf_length;
+ __u32 ssf_error;
+ struct sctp_sndinfo ssfe_info;
+ sctp_assoc_t ssf_assoc_id;
+ __u8 ssf_data[0];
+};
+
/*
* ssf_flags: 16 bits (unsigned integer)
*
@@ -605,6 +620,7 @@ struct sctp_event_subscribe {
__u8 sctp_stream_reset_event;
__u8 sctp_assoc_reset_event;
__u8 sctp_stream_change_event;
+ __u8 sctp_send_failure_event_event;
};
/*
@@ -632,6 +648,7 @@ union sctp_notification {
struct sctp_stream_reset_event sn_strreset_event;
struct sctp_assoc_reset_event sn_assocreset_event;
struct sctp_stream_change_event sn_strchange_event;
+ struct sctp_send_failed_event sn_send_failed_event;
};
/* Section 5.3.1
@@ -667,7 +684,9 @@ enum sctp_sn_type {
#define SCTP_ASSOC_RESET_EVENT SCTP_ASSOC_RESET_EVENT
SCTP_STREAM_CHANGE_EVENT,
#define SCTP_STREAM_CHANGE_EVENT SCTP_STREAM_CHANGE_EVENT
- SCTP_SN_TYPE_MAX = SCTP_STREAM_CHANGE_EVENT,
+ SCTP_SEND_FAILED_EVENT,
+#define SCTP_SEND_FAILED_EVENT SCTP_SEND_FAILED_EVENT
+ SCTP_SN_TYPE_MAX = SCTP_SEND_FAILED_EVENT,
#define SCTP_SN_TYPE_MAX SCTP_SN_TYPE_MAX
};
@@ -919,6 +938,7 @@ struct sctp_paddrinfo {
enum sctp_spinfo_state {
SCTP_INACTIVE,
SCTP_PF,
+#define SCTP_POTENTIALLY_FAILED SCTP_PF
SCTP_ACTIVE,
SCTP_UNCONFIRMED,
SCTP_UNKNOWN = 0xffff /* Value used for transport state unknown */
@@ -1068,6 +1088,15 @@ struct sctp_paddrthlds {
__u16 spt_pathpfthld;
};
+/* Use a new structure with spt_pathcpthld for back compatibility */
+struct sctp_paddrthlds_v2 {
+ sctp_assoc_t spt_assoc_id;
+ struct sockaddr_storage spt_address;
+ __u16 spt_pathmaxrxt;
+ __u16 spt_pathpfthld;
+ __u16 spt_pathcpthld;
+};
+
/*
* Socket Option for Getting the Association/Stream-Specific PR-SCTP Status
*/
diff --git a/include/uapi/linux/seccomp.h b/include/uapi/linux/seccomp.h
index 90734aa5aa36..c1735455bc53 100644
--- a/include/uapi/linux/seccomp.h
+++ b/include/uapi/linux/seccomp.h
@@ -22,6 +22,7 @@
#define SECCOMP_FILTER_FLAG_LOG (1UL << 1)
#define SECCOMP_FILTER_FLAG_SPEC_ALLOW (1UL << 2)
#define SECCOMP_FILTER_FLAG_NEW_LISTENER (1UL << 3)
+#define SECCOMP_FILTER_FLAG_TSYNC_ESRCH (1UL << 4)
/*
* All BPF programs must return a 32-bit value.
@@ -76,6 +77,35 @@ struct seccomp_notif {
struct seccomp_data data;
};
+/*
+ * Valid flags for struct seccomp_notif_resp
+ *
+ * Note, the SECCOMP_USER_NOTIF_FLAG_CONTINUE flag must be used with caution!
+ * If set by the process supervising the syscalls of another process the
+ * syscall will continue. This is problematic because of an inherent TOCTOU.
+ * An attacker can exploit the time while the supervised process is waiting on
+ * a response from the supervising process to rewrite syscall arguments which
+ * are passed as pointers of the intercepted syscall.
+ * It should be absolutely clear that this means that the seccomp notifier
+ * _cannot_ be used to implement a security policy! It should only ever be used
+ * in scenarios where a more privileged process supervises the syscalls of a
+ * lesser privileged process to get around kernel-enforced security
+ * restrictions when the privileged process deems this safe. In other words,
+ * in order to continue a syscall the supervising process should be sure that
+ * another security mechanism or the kernel itself will sufficiently block
+ * syscalls if arguments are rewritten to something unsafe.
+ *
+ * Similar precautions should be applied when stacking SECCOMP_RET_USER_NOTIF
+ * or SECCOMP_RET_TRACE. For SECCOMP_RET_USER_NOTIF filters acting on the
+ * same syscall, the most recently added filter takes precedence. This means
+ * that the new SECCOMP_RET_USER_NOTIF filter can override any
+ * SECCOMP_IOCTL_NOTIF_SEND from earlier filters, essentially allowing all
+ * such filtered syscalls to be executed by sending the response
+ * SECCOMP_USER_NOTIF_FLAG_CONTINUE. Note that SECCOMP_RET_TRACE can equally
+ * be overriden by SECCOMP_USER_NOTIF_FLAG_CONTINUE.
+ */
+#define SECCOMP_USER_NOTIF_FLAG_CONTINUE (1UL << 0)
+
struct seccomp_notif_resp {
__u64 id;
__s64 val;
diff --git a/include/uapi/linux/sed-opal.h b/include/uapi/linux/sed-opal.h
index c6d035fa1b6c..6f5af1a84213 100644
--- a/include/uapi/linux/sed-opal.h
+++ b/include/uapi/linux/sed-opal.h
@@ -113,6 +113,25 @@ struct opal_shadow_mbr {
__u64 size;
};
+/* Opal table operations */
+enum opal_table_ops {
+ OPAL_READ_TABLE,
+ OPAL_WRITE_TABLE,
+};
+
+#define OPAL_UID_LENGTH 8
+struct opal_read_write_table {
+ struct opal_key key;
+ const __u64 data;
+ const __u8 table_uid[OPAL_UID_LENGTH];
+ __u64 offset;
+ __u64 size;
+#define OPAL_TABLE_READ (1 << OPAL_READ_TABLE)
+#define OPAL_TABLE_WRITE (1 << OPAL_WRITE_TABLE)
+ __u64 flags;
+ __u64 priv;
+};
+
#define IOC_OPAL_SAVE _IOW('p', 220, struct opal_lock_unlock)
#define IOC_OPAL_LOCK_UNLOCK _IOW('p', 221, struct opal_lock_unlock)
#define IOC_OPAL_TAKE_OWNERSHIP _IOW('p', 222, struct opal_key)
@@ -128,5 +147,6 @@ struct opal_shadow_mbr {
#define IOC_OPAL_PSID_REVERT_TPR _IOW('p', 232, struct opal_key)
#define IOC_OPAL_MBR_DONE _IOW('p', 233, struct opal_mbr_done)
#define IOC_OPAL_WRITE_SHADOW_MBR _IOW('p', 234, struct opal_shadow_mbr)
+#define IOC_OPAL_GENERIC_TABLE_RW _IOW('p', 235, struct opal_read_write_table)
#endif /* _UAPI_SED_OPAL_H */
diff --git a/include/uapi/linux/sem.h b/include/uapi/linux/sem.h
index 39a1876f039e..75aa3b273cd9 100644
--- a/include/uapi/linux/sem.h
+++ b/include/uapi/linux/sem.h
@@ -24,8 +24,8 @@
/* Obsolete, used only for backwards compatibility and libc5 compiles */
struct semid_ds {
struct ipc_perm sem_perm; /* permissions .. see ipc.h */
- __kernel_time_t sem_otime; /* last semop time */
- __kernel_time_t sem_ctime; /* create/last semctl() time */
+ __kernel_old_time_t sem_otime; /* last semop time */
+ __kernel_old_time_t sem_ctime; /* create/last semctl() time */
struct sem *sem_base; /* ptr to first semaphore in array */
struct sem_queue *sem_pending; /* pending operations to be processed */
struct sem_queue **sem_pending_last; /* last pending operation */
diff --git a/include/uapi/linux/serial_core.h b/include/uapi/linux/serial_core.h
index e7fe550b6038..8ec3dd742ea4 100644
--- a/include/uapi/linux/serial_core.h
+++ b/include/uapi/linux/serial_core.h
@@ -290,7 +290,7 @@
/* Sunix UART */
#define PORT_SUNIX 121
-/* Freescale Linflex UART */
+/* Freescale LINFlexD UART */
#define PORT_LINFLEXUART 122
#endif /* _UAPILINUX_SERIAL_CORE_H */
diff --git a/include/uapi/linux/serio.h b/include/uapi/linux/serio.h
index 50e991952c97..ed2a96f43ce4 100644
--- a/include/uapi/linux/serio.h
+++ b/include/uapi/linux/serio.h
@@ -9,7 +9,7 @@
#ifndef _UAPI_SERIO_H
#define _UAPI_SERIO_H
-
+#include <linux/const.h>
#include <linux/ioctl.h>
#define SPIOCSTYPE _IOW('q', 0x01, unsigned long)
@@ -18,10 +18,10 @@
/*
* bit masks for use in "interrupt" flags (3rd argument)
*/
-#define SERIO_TIMEOUT BIT(0)
-#define SERIO_PARITY BIT(1)
-#define SERIO_FRAME BIT(2)
-#define SERIO_OOB_DATA BIT(3)
+#define SERIO_TIMEOUT _BITUL(0)
+#define SERIO_PARITY _BITUL(1)
+#define SERIO_FRAME _BITUL(2)
+#define SERIO_OOB_DATA _BITUL(3)
/*
* Serio types
diff --git a/include/uapi/linux/shm.h b/include/uapi/linux/shm.h
index 6507ad0afc81..8d1f17a4e08e 100644
--- a/include/uapi/linux/shm.h
+++ b/include/uapi/linux/shm.h
@@ -28,9 +28,9 @@
struct shmid_ds {
struct ipc_perm shm_perm; /* operation perms */
int shm_segsz; /* size of segment (bytes) */
- __kernel_time_t shm_atime; /* last attach time */
- __kernel_time_t shm_dtime; /* last detach time */
- __kernel_time_t shm_ctime; /* last change time */
+ __kernel_old_time_t shm_atime; /* last attach time */
+ __kernel_old_time_t shm_dtime; /* last detach time */
+ __kernel_old_time_t shm_ctime; /* last change time */
__kernel_ipc_pid_t shm_cpid; /* pid of creator */
__kernel_ipc_pid_t shm_lpid; /* pid of last operator */
unsigned short shm_nattch; /* no. of current attaches */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 549a31c29f7d..7d91f4debc48 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -285,6 +285,8 @@ enum
LINUX_MIB_TCPRCVQDROP, /* TCPRcvQDrop */
LINUX_MIB_TCPWQUEUETOOBIG, /* TCPWqueueTooBig */
LINUX_MIB_TCPFASTOPENPASSIVEALTKEY, /* TCPFastOpenPassiveAltKey */
+ LINUX_MIB_TCPTIMEOUTREHASH, /* TCPTimeoutRehash */
+ LINUX_MIB_TCPDUPLICATEDATAREHASH, /* TCPDuplicateDataRehash */
__LINUX_MIB_MAX
};
@@ -323,4 +325,21 @@ enum
__LINUX_MIB_XFRMMAX
};
+/* linux TLS mib definitions */
+enum
+{
+ LINUX_MIB_TLSNUM = 0,
+ LINUX_MIB_TLSCURRTXSW, /* TlsCurrTxSw */
+ LINUX_MIB_TLSCURRRXSW, /* TlsCurrRxSw */
+ LINUX_MIB_TLSCURRTXDEVICE, /* TlsCurrTxDevice */
+ LINUX_MIB_TLSCURRRXDEVICE, /* TlsCurrRxDevice */
+ LINUX_MIB_TLSTXSW, /* TlsTxSw */
+ LINUX_MIB_TLSRXSW, /* TlsRxSw */
+ LINUX_MIB_TLSTXDEVICE, /* TlsTxDevice */
+ LINUX_MIB_TLSRXDEVICE, /* TlsRxDevice */
+ LINUX_MIB_TLSDECRYPTERROR, /* TlsDecryptError */
+ LINUX_MIB_TLSRXDEVICERESYNC, /* TlsRxDeviceResync */
+ __LINUX_MIB_TLSMAX
+};
+
#endif /* _LINUX_SNMP_H */
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index 7b35e98d3c58..ad80a5c885d5 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -167,8 +167,8 @@ struct statx {
#define STATX_ATTR_APPEND 0x00000020 /* [I] File is append-only */
#define STATX_ATTR_NODUMP 0x00000040 /* [I] File is not to be dumped */
#define STATX_ATTR_ENCRYPTED 0x00000800 /* [I] File requires key to decrypt in fs */
-
#define STATX_ATTR_AUTOMOUNT 0x00001000 /* Dir: Automount trigger */
+#define STATX_ATTR_VERITY 0x00100000 /* [I] Verity protected file */
#endif /* _UAPI_LINUX_STAT_H */
diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
index 23cd84868cc3..7272f85d6d6a 100644
--- a/include/uapi/linux/swab.h
+++ b/include/uapi/linux/swab.h
@@ -4,6 +4,7 @@
#include <linux/types.h>
#include <linux/compiler.h>
+#include <asm/bitsperlong.h>
#include <asm/swab.h>
/*
@@ -132,6 +133,15 @@ static inline __attribute_const__ __u32 __fswahb32(__u32 val)
__fswab64(x))
#endif
+static __always_inline unsigned long __swab(const unsigned long y)
+{
+#if __BITS_PER_LONG == 64
+ return __swab64(y);
+#else /* __BITS_PER_LONG == 32 */
+ return __swab32(y);
+#endif
+}
+
/**
* __swahw32 - return a word-swapped 32-bit value
* @x: value to wordswap
diff --git a/include/uapi/linux/switchtec_ioctl.h b/include/uapi/linux/switchtec_ioctl.h
index c912b5a678e4..2c661a3557e5 100644
--- a/include/uapi/linux/switchtec_ioctl.h
+++ b/include/uapi/linux/switchtec_ioctl.h
@@ -32,7 +32,18 @@
#define SWITCHTEC_IOCTL_PART_VENDOR5 10
#define SWITCHTEC_IOCTL_PART_VENDOR6 11
#define SWITCHTEC_IOCTL_PART_VENDOR7 12
-#define SWITCHTEC_IOCTL_NUM_PARTITIONS 13
+#define SWITCHTEC_IOCTL_PART_BL2_0 13
+#define SWITCHTEC_IOCTL_PART_BL2_1 14
+#define SWITCHTEC_IOCTL_PART_MAP_0 15
+#define SWITCHTEC_IOCTL_PART_MAP_1 16
+#define SWITCHTEC_IOCTL_PART_KEY_0 17
+#define SWITCHTEC_IOCTL_PART_KEY_1 18
+
+#define SWITCHTEC_NUM_PARTITIONS_GEN3 13
+#define SWITCHTEC_NUM_PARTITIONS_GEN4 19
+
+/* obsolete: for compatibility with old userspace software */
+#define SWITCHTEC_IOCTL_NUM_PARTITIONS SWITCHTEC_NUM_PARTITIONS_GEN3
struct switchtec_ioctl_flash_info {
__u64 flash_length;
@@ -98,7 +109,9 @@ struct switchtec_ioctl_event_summary {
#define SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT 27
#define SWITCHTEC_IOCTL_EVENT_LINK_STATE 28
#define SWITCHTEC_IOCTL_EVENT_GFMS 29
-#define SWITCHTEC_IOCTL_MAX_EVENTS 30
+#define SWITCHTEC_IOCTL_EVENT_INTERCOMM_REQ_NOTIFY 30
+#define SWITCHTEC_IOCTL_EVENT_UEC 31
+#define SWITCHTEC_IOCTL_MAX_EVENTS 32
#define SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX -1
#define SWITCHTEC_IOCTL_EVENT_IDX_ALL -2
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index 87aa2a6d9125..27c1ed2822e6 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -195,7 +195,7 @@ enum
VM_MIN_UNMAPPED=32, /* Set min percent of unmapped pages */
VM_PANIC_ON_OOM=33, /* panic at out-of-memory */
VM_VDSO_ENABLED=34, /* map VDSO into new processes? */
- VM_MIN_SLAB=35, /* Percent pages ignored by zone reclaim */
+ VM_MIN_SLAB=35, /* Percent pages ignored by node reclaim */
};
diff --git a/include/uapi/linux/taskstats.h b/include/uapi/linux/taskstats.h
index 5e8ca16a9079..ccbd08709321 100644
--- a/include/uapi/linux/taskstats.h
+++ b/include/uapi/linux/taskstats.h
@@ -34,7 +34,7 @@
*/
-#define TASKSTATS_VERSION 9
+#define TASKSTATS_VERSION 10
#define TS_COMM_LEN 32 /* should be >= TASK_COMM_LEN
* in linux/sched.h */
@@ -112,6 +112,7 @@ struct taskstats {
__u32 ac_gid; /* Group ID */
__u32 ac_pid; /* Process ID */
__u32 ac_ppid; /* Parent process ID */
+ /* __u32 range means times from 1970 to 2106 */
__u32 ac_btime; /* Begin time [sec since 1970] */
__u64 ac_etime __attribute__((aligned(8)));
/* Elapsed time [usec] */
@@ -168,6 +169,9 @@ struct taskstats {
/* Delay waiting for thrashing page */
__u64 thrashing_count;
__u64 thrashing_delay_total;
+
+ /* v10: 64-bit btime to avoid overflow */
+ __u64 ac_btime64; /* 64-bit begin time */
};
diff --git a/include/uapi/linux/tc_act/tc_tunnel_key.h b/include/uapi/linux/tc_act/tc_tunnel_key.h
index 41c8b462c177..3f10dc4e7a4b 100644
--- a/include/uapi/linux/tc_act/tc_tunnel_key.h
+++ b/include/uapi/linux/tc_act/tc_tunnel_key.h
@@ -50,6 +50,14 @@ enum {
* TCA_TUNNEL_KEY_ENC_OPTS_
* attributes
*/
+ TCA_TUNNEL_KEY_ENC_OPTS_VXLAN, /* Nested
+ * TCA_TUNNEL_KEY_ENC_OPTS_
+ * attributes
+ */
+ TCA_TUNNEL_KEY_ENC_OPTS_ERSPAN, /* Nested
+ * TCA_TUNNEL_KEY_ENC_OPTS_
+ * attributes
+ */
__TCA_TUNNEL_KEY_ENC_OPTS_MAX,
};
@@ -67,4 +75,25 @@ enum {
#define TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX \
(__TCA_TUNNEL_KEY_ENC_OPT_GENEVE_MAX - 1)
+enum {
+ TCA_TUNNEL_KEY_ENC_OPT_VXLAN_UNSPEC,
+ TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP, /* u32 */
+ __TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX,
+};
+
+#define TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX \
+ (__TCA_TUNNEL_KEY_ENC_OPT_VXLAN_MAX - 1)
+
+enum {
+ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_UNSPEC,
+ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_VER, /* u8 */
+ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_INDEX, /* be32 */
+ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_DIR, /* u8 */
+ TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_HWID, /* u8 */
+ __TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX,
+};
+
+#define TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX \
+ (__TCA_TUNNEL_KEY_ENC_OPT_ERSPAN_MAX - 1)
+
#endif
diff --git a/include/uapi/linux/tcp.h b/include/uapi/linux/tcp.h
index 81e697978e8b..fd9eb8f6bcae 100644
--- a/include/uapi/linux/tcp.h
+++ b/include/uapi/linux/tcp.h
@@ -155,6 +155,14 @@ enum {
TCP_QUEUES_NR,
};
+/* why fastopen failed from client perspective */
+enum tcp_fastopen_client_fail {
+ TFO_STATUS_UNSPEC, /* catch-all */
+ TFO_COOKIE_UNAVAILABLE, /* if not in TFO_CLIENT_NO_COOKIE mode */
+ TFO_DATA_NOT_ACKED, /* SYN-ACK did not ack SYN data */
+ TFO_SYN_RETRANSMITTED, /* SYN-ACK did not ack SYN data after timeout */
+};
+
/* for TCP_INFO socket option */
#define TCPI_OPT_TIMESTAMPS 1
#define TCPI_OPT_SACK 2
@@ -211,7 +219,7 @@ struct tcp_info {
__u8 tcpi_backoff;
__u8 tcpi_options;
__u8 tcpi_snd_wscale : 4, tcpi_rcv_wscale : 4;
- __u8 tcpi_delivery_rate_app_limited:1;
+ __u8 tcpi_delivery_rate_app_limited:1, tcpi_fastopen_client_fail:2;
__u32 tcpi_rto;
__u32 tcpi_ato;
@@ -303,20 +311,22 @@ enum {
TCP_NLA_DSACK_DUPS, /* DSACK blocks received */
TCP_NLA_REORD_SEEN, /* reordering events seen */
TCP_NLA_SRTT, /* smoothed RTT in usecs */
+ TCP_NLA_TIMEOUT_REHASH, /* Timeout-triggered rehash attempts */
};
/* for TCP_MD5SIG socket option */
#define TCP_MD5SIG_MAXKEYLEN 80
/* tcp_md5sig extension flags for TCP_MD5SIG_EXT */
-#define TCP_MD5SIG_FLAG_PREFIX 1 /* address prefix length */
+#define TCP_MD5SIG_FLAG_PREFIX 0x1 /* address prefix length */
+#define TCP_MD5SIG_FLAG_IFINDEX 0x2 /* ifindex set */
struct tcp_md5sig {
struct __kernel_sockaddr_storage tcpm_addr; /* address associated */
__u8 tcpm_flags; /* extension flags */
__u8 tcpm_prefixlen; /* address prefix */
__u16 tcpm_keylen; /* key length */
- __u32 __tcpm_pad; /* zero */
+ int tcpm_ifindex; /* device index for scope */
__u8 tcpm_key[TCP_MD5SIG_MAXKEYLEN]; /* key (binary) */
};
diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h
index 4b9eb064d7e7..6596f3a09e54 100644
--- a/include/uapi/linux/tee.h
+++ b/include/uapi/linux/tee.h
@@ -56,6 +56,7 @@
* TEE Implementation ID
*/
#define TEE_IMPL_ID_OPTEE 1
+#define TEE_IMPL_ID_AMDTEE 2
/*
* OP-TEE specific capabilities
diff --git a/include/uapi/linux/time.h b/include/uapi/linux/time.h
index 958932effc5e..4f4b6e48e01c 100644
--- a/include/uapi/linux/time.h
+++ b/include/uapi/linux/time.h
@@ -5,19 +5,31 @@
#include <linux/types.h>
#include <linux/time_types.h>
+#ifndef __KERNEL__
#ifndef _STRUCT_TIMESPEC
#define _STRUCT_TIMESPEC
struct timespec {
- __kernel_time_t tv_sec; /* seconds */
- long tv_nsec; /* nanoseconds */
+ __kernel_old_time_t tv_sec; /* seconds */
+ long tv_nsec; /* nanoseconds */
};
#endif
struct timeval {
- __kernel_time_t tv_sec; /* seconds */
+ __kernel_old_time_t tv_sec; /* seconds */
__kernel_suseconds_t tv_usec; /* microseconds */
};
+struct itimerspec {
+ struct timespec it_interval;/* timer period */
+ struct timespec it_value; /* timer expiration */
+};
+
+struct itimerval {
+ struct timeval it_interval;/* timer interval */
+ struct timeval it_value; /* current value */
+};
+#endif
+
struct timezone {
int tz_minuteswest; /* minutes west of Greenwich */
int tz_dsttime; /* type of dst correction */
@@ -31,16 +43,6 @@ struct timezone {
#define ITIMER_VIRTUAL 1
#define ITIMER_PROF 2
-struct itimerspec {
- struct timespec it_interval; /* timer period */
- struct timespec it_value; /* timer expiration */
-};
-
-struct itimerval {
- struct timeval it_interval; /* timer interval */
- struct timeval it_value; /* current value */
-};
-
/*
* The IDs of the various system clocks (for POSIX.1b interval timers):
*/
diff --git a/include/uapi/linux/time_types.h b/include/uapi/linux/time_types.h
index 27bfc8fc6904..bcc0002115d3 100644
--- a/include/uapi/linux/time_types.h
+++ b/include/uapi/linux/time_types.h
@@ -28,6 +28,16 @@ struct __kernel_old_timeval {
};
#endif
+struct __kernel_old_timespec {
+ __kernel_old_time_t tv_sec; /* seconds */
+ long tv_nsec; /* nanoseconds */
+};
+
+struct __kernel_old_itimerval {
+ struct __kernel_old_timeval it_interval;/* timer interval */
+ struct __kernel_old_timeval it_value; /* current value */
+};
+
struct __kernel_sock_timeval {
__s64 tv_sec;
__s64 tv_usec;
diff --git a/include/uapi/linux/timex.h b/include/uapi/linux/timex.h
index 9f517f9010bb..bd627c368d09 100644
--- a/include/uapi/linux/timex.h
+++ b/include/uapi/linux/timex.h
@@ -57,6 +57,7 @@
#define NTP_API 4 /* NTP API version */
+#ifndef __KERNEL__
/*
* syscall interface - used (mainly by NTP daemon)
* to discipline kernel clock oscillator
@@ -91,6 +92,7 @@ struct timex {
int :32; int :32; int :32; int :32;
int :32; int :32; int :32;
};
+#endif
struct __kernel_timex_timeval {
__kernel_time64_t tv_sec;
diff --git a/include/uapi/linux/tipc.h b/include/uapi/linux/tipc.h
index 7df026ea6aff..add01db1daef 100644
--- a/include/uapi/linux/tipc.h
+++ b/include/uapi/linux/tipc.h
@@ -191,6 +191,7 @@ struct sockaddr_tipc {
#define TIPC_GROUP_JOIN 135 /* Takes struct tipc_group_req* */
#define TIPC_GROUP_LEAVE 136 /* No argument */
#define TIPC_SOCK_RECVQ_USED 137 /* Default: none (read only) */
+#define TIPC_NODELAY 138 /* Default: false */
/*
* Flag values
@@ -232,6 +233,27 @@ struct tipc_sioc_nodeid_req {
char node_id[TIPC_NODEID_LEN];
};
+/*
+ * TIPC Crypto, AEAD
+ */
+#define TIPC_AEAD_ALG_NAME (32)
+
+struct tipc_aead_key {
+ char alg_name[TIPC_AEAD_ALG_NAME];
+ unsigned int keylen; /* in bytes */
+ char key[];
+};
+
+#define TIPC_AEAD_KEYLEN_MIN (16 + 4)
+#define TIPC_AEAD_KEYLEN_MAX (32 + 4)
+#define TIPC_AEAD_KEY_SIZE_MAX (sizeof(struct tipc_aead_key) + \
+ TIPC_AEAD_KEYLEN_MAX)
+
+static inline int tipc_aead_key_size(struct tipc_aead_key *key)
+{
+ return sizeof(*key) + key->keylen;
+}
+
/* The macros and functions below are deprecated:
*/
diff --git a/include/uapi/linux/tipc_config.h b/include/uapi/linux/tipc_config.h
index 4955e1a9f1bc..4dfc05651c98 100644
--- a/include/uapi/linux/tipc_config.h
+++ b/include/uapi/linux/tipc_config.h
@@ -309,7 +309,7 @@ static inline int TLV_SET(void *tlv, __u16 type, void *data, __u16 len)
tlv_ptr->tlv_len = htons(tlv_len);
if (len && data) {
memcpy(TLV_DATA(tlv_ptr), data, len);
- memset(TLV_DATA(tlv_ptr) + len, 0, TLV_SPACE(len) - tlv_len);
+ memset((char *)TLV_DATA(tlv_ptr) + len, 0, TLV_SPACE(len) - tlv_len);
}
return TLV_SPACE(len);
}
@@ -409,7 +409,7 @@ static inline int TCM_SET(void *msg, __u16 cmd, __u16 flags,
tcm_hdr->tcm_flags = htons(flags);
if (data_len && data) {
memcpy(TCM_DATA(msg), data, data_len);
- memset(TCM_DATA(msg) + data_len, 0, TCM_SPACE(data_len) - msg_len);
+ memset((char *)TCM_DATA(msg) + data_len, 0, TCM_SPACE(data_len) - msg_len);
}
return TCM_SPACE(data_len);
}
diff --git a/include/uapi/linux/tipc_netlink.h b/include/uapi/linux/tipc_netlink.h
index efb958fd167d..dc0d23a50e69 100644
--- a/include/uapi/linux/tipc_netlink.h
+++ b/include/uapi/linux/tipc_netlink.h
@@ -63,6 +63,9 @@ enum {
TIPC_NL_PEER_REMOVE,
TIPC_NL_BEARER_ADD,
TIPC_NL_UDP_GET_REMOTEIP,
+ TIPC_NL_KEY_SET,
+ TIPC_NL_KEY_FLUSH,
+ TIPC_NL_ADDR_LEGACY_GET,
__TIPC_NL_CMD_MAX,
TIPC_NL_CMD_MAX = __TIPC_NL_CMD_MAX - 1
@@ -160,6 +163,8 @@ enum {
TIPC_NLA_NODE_UNSPEC,
TIPC_NLA_NODE_ADDR, /* u32 */
TIPC_NLA_NODE_UP, /* flag */
+ TIPC_NLA_NODE_ID, /* data */
+ TIPC_NLA_NODE_KEY, /* data */
__TIPC_NLA_NODE_MAX,
TIPC_NLA_NODE_MAX = __TIPC_NLA_NODE_MAX - 1
@@ -172,6 +177,7 @@ enum {
TIPC_NLA_NET_ADDR, /* u32 */
TIPC_NLA_NET_NODEID, /* u64 */
TIPC_NLA_NET_NODEID_W1, /* u64 */
+ TIPC_NLA_NET_ADDR_LEGACY, /* flag */
__TIPC_NLA_NET_MAX,
TIPC_NLA_NET_MAX = __TIPC_NLA_NET_MAX - 1
diff --git a/include/uapi/linux/udp.h b/include/uapi/linux/udp.h
index 30baccb6c9c4..4828794efcf8 100644
--- a/include/uapi/linux/udp.h
+++ b/include/uapi/linux/udp.h
@@ -42,5 +42,6 @@ struct udphdr {
#define UDP_ENCAP_GTP0 4 /* GSM TS 09.60 */
#define UDP_ENCAP_GTP1U 5 /* 3GPP TS 29.060 */
#define UDP_ENCAP_RXRPC 6
+#define TCP_ENCAP_ESPINTCP 7 /* Yikes, this is really xfrm encap types. */
#endif /* _UAPI_LINUX_UDP_H */
diff --git a/include/uapi/linux/usb/charger.h b/include/uapi/linux/usb/charger.h
index 5f72af35b3ed..ad22079125bf 100644
--- a/include/uapi/linux/usb/charger.h
+++ b/include/uapi/linux/usb/charger.h
@@ -14,18 +14,18 @@
* ACA (Accessory Charger Adapters)
*/
enum usb_charger_type {
- UNKNOWN_TYPE,
- SDP_TYPE,
- DCP_TYPE,
- CDP_TYPE,
- ACA_TYPE,
+ UNKNOWN_TYPE = 0,
+ SDP_TYPE = 1,
+ DCP_TYPE = 2,
+ CDP_TYPE = 3,
+ ACA_TYPE = 4,
};
/* USB charger state */
enum usb_charger_state {
- USB_CHARGER_DEFAULT,
- USB_CHARGER_PRESENT,
- USB_CHARGER_ABSENT,
+ USB_CHARGER_DEFAULT = 0,
+ USB_CHARGER_PRESENT = 1,
+ USB_CHARGER_ABSENT = 2,
};
#endif /* _UAPI__LINUX_USB_CHARGER_H */
diff --git a/include/uapi/linux/usb/raw_gadget.h b/include/uapi/linux/usb/raw_gadget.h
new file mode 100644
index 000000000000..ea375082b3ac
--- /dev/null
+++ b/include/uapi/linux/usb/raw_gadget.h
@@ -0,0 +1,167 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * USB Raw Gadget driver.
+ *
+ * See Documentation/usb/raw-gadget.rst for more details.
+ */
+
+#ifndef _UAPI__LINUX_USB_RAW_GADGET_H
+#define _UAPI__LINUX_USB_RAW_GADGET_H
+
+#include <asm/ioctl.h>
+#include <linux/types.h>
+#include <linux/usb/ch9.h>
+
+/* Maximum length of driver_name/device_name in the usb_raw_init struct. */
+#define UDC_NAME_LENGTH_MAX 128
+
+/*
+ * struct usb_raw_init - argument for USB_RAW_IOCTL_INIT ioctl.
+ * @speed: The speed of the emulated USB device, takes the same values as
+ * the usb_device_speed enum: USB_SPEED_FULL, USB_SPEED_HIGH, etc.
+ * @driver_name: The name of the UDC driver.
+ * @device_name: The name of a UDC instance.
+ *
+ * The last two fields identify a UDC the gadget driver should bind to.
+ * For example, Dummy UDC has "dummy_udc" as its driver_name and "dummy_udc.N"
+ * as its device_name, where N in the index of the Dummy UDC instance.
+ * At the same time the dwc2 driver that is used on Raspberry Pi Zero, has
+ * "20980000.usb" as both driver_name and device_name.
+ */
+struct usb_raw_init {
+ __u8 driver_name[UDC_NAME_LENGTH_MAX];
+ __u8 device_name[UDC_NAME_LENGTH_MAX];
+ __u8 speed;
+};
+
+/* The type of event fetched with the USB_RAW_IOCTL_EVENT_FETCH ioctl. */
+enum usb_raw_event_type {
+ USB_RAW_EVENT_INVALID = 0,
+
+ /* This event is queued when the driver has bound to a UDC. */
+ USB_RAW_EVENT_CONNECT = 1,
+
+ /* This event is queued when a new control request arrived to ep0. */
+ USB_RAW_EVENT_CONTROL = 2,
+
+ /* The list might grow in the future. */
+};
+
+/*
+ * struct usb_raw_event - argument for USB_RAW_IOCTL_EVENT_FETCH ioctl.
+ * @type: The type of the fetched event.
+ * @length: Length of the data buffer. Updated by the driver and set to the
+ * actual length of the fetched event data.
+ * @data: A buffer to store the fetched event data.
+ *
+ * Currently the fetched data buffer is empty for USB_RAW_EVENT_CONNECT,
+ * and contains struct usb_ctrlrequest for USB_RAW_EVENT_CONTROL.
+ */
+struct usb_raw_event {
+ __u32 type;
+ __u32 length;
+ __u8 data[0];
+};
+
+#define USB_RAW_IO_FLAGS_ZERO 0x0001
+#define USB_RAW_IO_FLAGS_MASK 0x0001
+
+static inline int usb_raw_io_flags_valid(__u16 flags)
+{
+ return (flags & ~USB_RAW_IO_FLAGS_MASK) == 0;
+}
+
+static inline int usb_raw_io_flags_zero(__u16 flags)
+{
+ return (flags & USB_RAW_IO_FLAGS_ZERO);
+}
+
+/*
+ * struct usb_raw_ep_io - argument for USB_RAW_IOCTL_EP0/EP_WRITE/READ ioctls.
+ * @ep: Endpoint handle as returned by USB_RAW_IOCTL_EP_ENABLE for
+ * USB_RAW_IOCTL_EP_WRITE/READ. Ignored for USB_RAW_IOCTL_EP0_WRITE/READ.
+ * @flags: When USB_RAW_IO_FLAGS_ZERO is specified, the zero flag is set on
+ * the submitted USB request, see include/linux/usb/gadget.h for details.
+ * @length: Length of data.
+ * @data: Data to send for USB_RAW_IOCTL_EP0/EP_WRITE. Buffer to store received
+ * data for USB_RAW_IOCTL_EP0/EP_READ.
+ */
+struct usb_raw_ep_io {
+ __u16 ep;
+ __u16 flags;
+ __u32 length;
+ __u8 data[0];
+};
+
+/*
+ * Initializes a Raw Gadget instance.
+ * Accepts a pointer to the usb_raw_init struct as an argument.
+ * Returns 0 on success or negative error code on failure.
+ */
+#define USB_RAW_IOCTL_INIT _IOW('U', 0, struct usb_raw_init)
+
+/*
+ * Instructs Raw Gadget to bind to a UDC and start emulating a USB device.
+ * Returns 0 on success or negative error code on failure.
+ */
+#define USB_RAW_IOCTL_RUN _IO('U', 1)
+
+/*
+ * A blocking ioctl that waits for an event and returns fetched event data to
+ * the user.
+ * Accepts a pointer to the usb_raw_event struct.
+ * Returns 0 on success or negative error code on failure.
+ */
+#define USB_RAW_IOCTL_EVENT_FETCH _IOR('U', 2, struct usb_raw_event)
+
+/*
+ * Queues an IN (OUT for READ) urb as a response to the last control request
+ * received on endpoint 0, provided that was an IN (OUT for READ) request and
+ * waits until the urb is completed. Copies received data to user for READ.
+ * Accepts a pointer to the usb_raw_ep_io struct as an argument.
+ * Returns length of trasferred data on success or negative error code on
+ * failure.
+ */
+#define USB_RAW_IOCTL_EP0_WRITE _IOW('U', 3, struct usb_raw_ep_io)
+#define USB_RAW_IOCTL_EP0_READ _IOWR('U', 4, struct usb_raw_ep_io)
+
+/*
+ * Finds an endpoint that supports the transfer type specified in the
+ * descriptor and enables it.
+ * Accepts a pointer to the usb_endpoint_descriptor struct as an argument.
+ * Returns enabled endpoint handle on success or negative error code on failure.
+ */
+#define USB_RAW_IOCTL_EP_ENABLE _IOW('U', 5, struct usb_endpoint_descriptor)
+
+/* Disables specified endpoint.
+ * Accepts endpoint handle as an argument.
+ * Returns 0 on success or negative error code on failure.
+ */
+#define USB_RAW_IOCTL_EP_DISABLE _IOW('U', 6, __u32)
+
+/*
+ * Queues an IN (OUT for READ) urb as a response to the last control request
+ * received on endpoint usb_raw_ep_io.ep, provided that was an IN (OUT for READ)
+ * request and waits until the urb is completed. Copies received data to user
+ * for READ.
+ * Accepts a pointer to the usb_raw_ep_io struct as an argument.
+ * Returns length of trasferred data on success or negative error code on
+ * failure.
+ */
+#define USB_RAW_IOCTL_EP_WRITE _IOW('U', 7, struct usb_raw_ep_io)
+#define USB_RAW_IOCTL_EP_READ _IOWR('U', 8, struct usb_raw_ep_io)
+
+/*
+ * Switches the gadget into the configured state.
+ * Returns 0 on success or negative error code on failure.
+ */
+#define USB_RAW_IOCTL_CONFIGURE _IO('U', 9)
+
+/*
+ * Constrains UDC VBUS power usage.
+ * Accepts current limit in 2 mA units as an argument.
+ * Returns 0 on success or negative error code on failure.
+ */
+#define USB_RAW_IOCTL_VBUS_DRAW _IOW('U', 10, __u32)
+
+#endif /* _UAPI__LINUX_USB_RAW_GADGET_H */
diff --git a/include/uapi/linux/utime.h b/include/uapi/linux/utime.h
index fd9aa26b6860..bc8f13e81d6e 100644
--- a/include/uapi/linux/utime.h
+++ b/include/uapi/linux/utime.h
@@ -5,8 +5,8 @@
#include <linux/types.h>
struct utimbuf {
- __kernel_time_t actime;
- __kernel_time_t modtime;
+ __kernel_old_time_t actime;
+ __kernel_old_time_t modtime;
};
#endif
diff --git a/include/uapi/linux/v4l2-controls.h b/include/uapi/linux/v4l2-controls.h
index a2669b79b294..1a58d7cc4ccc 100644
--- a/include/uapi/linux/v4l2-controls.h
+++ b/include/uapi/linux/v4l2-controls.h
@@ -192,6 +192,12 @@ enum v4l2_colorfx {
* We reserve 16 controls for this driver. */
#define V4L2_CID_USER_IMX_BASE (V4L2_CID_USER_BASE + 0x10b0)
+/*
+ * The base for the atmel isc driver controls.
+ * We reserve 32 controls for this driver.
+ */
+#define V4L2_CID_USER_ATMEL_ISC_BASE (V4L2_CID_USER_BASE + 0x10c0)
+
/* MPEG-class control IDs */
/* The MPEG controls are applicable to all codec controls
* and the 'MPEG' part of the define is historical */
@@ -1034,6 +1040,7 @@ enum v4l2_jpeg_chroma_subsampling {
#define V4L2_CID_TEST_PATTERN_GREENR (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 5)
#define V4L2_CID_TEST_PATTERN_BLUE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 6)
#define V4L2_CID_TEST_PATTERN_GREENB (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 7)
+#define V4L2_CID_UNIT_CELL_SIZE (V4L2_CID_IMAGE_SOURCE_CLASS_BASE + 8)
/* Image processing controls */
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 530638dffd93..9817b7e2c968 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -422,6 +422,11 @@ struct v4l2_fract {
__u32 denominator;
};
+struct v4l2_area {
+ __u32 width;
+ __u32 height;
+};
+
/**
* struct v4l2_capability - Describes V4L2 device caps returned by VIDIOC_QUERYCAP
*
@@ -560,6 +565,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_Y6 v4l2_fourcc('Y', '0', '6', ' ') /* 6 Greyscale */
#define V4L2_PIX_FMT_Y10 v4l2_fourcc('Y', '1', '0', ' ') /* 10 Greyscale */
#define V4L2_PIX_FMT_Y12 v4l2_fourcc('Y', '1', '2', ' ') /* 12 Greyscale */
+#define V4L2_PIX_FMT_Y14 v4l2_fourcc('Y', '1', '4', ' ') /* 14 Greyscale */
#define V4L2_PIX_FMT_Y16 v4l2_fourcc('Y', '1', '6', ' ') /* 16 Greyscale */
#define V4L2_PIX_FMT_Y16_BE v4l2_fourcc_be('Y', '1', '6', ' ') /* 16 Greyscale BE */
@@ -657,6 +663,10 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_SGBRG12P v4l2_fourcc('p', 'G', 'C', 'C')
#define V4L2_PIX_FMT_SGRBG12P v4l2_fourcc('p', 'g', 'C', 'C')
#define V4L2_PIX_FMT_SRGGB12P v4l2_fourcc('p', 'R', 'C', 'C')
+#define V4L2_PIX_FMT_SBGGR14 v4l2_fourcc('B', 'G', '1', '4') /* 14 BGBG.. GRGR.. */
+#define V4L2_PIX_FMT_SGBRG14 v4l2_fourcc('G', 'B', '1', '4') /* 14 GBGB.. RGRG.. */
+#define V4L2_PIX_FMT_SGRBG14 v4l2_fourcc('G', 'R', '1', '4') /* 14 GRGR.. BGBG.. */
+#define V4L2_PIX_FMT_SRGGB14 v4l2_fourcc('R', 'G', '1', '4') /* 14 RGRG.. GBGB.. */
/* 14bit raw bayer packed, 7 bytes for every 4 pixels */
#define V4L2_PIX_FMT_SBGGR14P v4l2_fourcc('p', 'B', 'E', 'E')
#define V4L2_PIX_FMT_SGBRG14P v4l2_fourcc('p', 'G', 'E', 'E')
@@ -755,6 +765,7 @@ struct v4l2_pix_format {
#define V4L2_META_FMT_VSP1_HGT v4l2_fourcc('V', 'S', 'P', 'T') /* R-Car VSP1 2-D Histogram */
#define V4L2_META_FMT_UVC v4l2_fourcc('U', 'V', 'C', 'H') /* UVC Payload Header metadata */
#define V4L2_META_FMT_D4XX v4l2_fourcc('D', '4', 'X', 'X') /* D4XX Payload Header metadata */
+#define V4L2_META_FMT_VIVID v4l2_fourcc('V', 'I', 'V', 'D') /* Vivid Metadata */
/* priv field value to indicates that subsequent fields are valid. */
#define V4L2_PIX_FMT_PRIV_MAGIC 0xfeedcafe
@@ -906,6 +917,25 @@ struct v4l2_jpegcompression {
/*
* M E M O R Y - M A P P I N G B U F F E R S
*/
+
+#ifdef __KERNEL__
+/*
+ * This corresponds to the user space version of timeval
+ * for 64-bit time_t. sparc64 is different from everyone
+ * else, using the microseconds in the wrong half of the
+ * second 64-bit word.
+ */
+struct __kernel_v4l2_timeval {
+ long long tv_sec;
+#if defined(__sparc__) && defined(__arch64__)
+ int tv_usec;
+ int __pad;
+#else
+ long long tv_usec;
+#endif
+};
+#endif
+
struct v4l2_requestbuffers {
__u32 count;
__u32 type; /* enum v4l2_buf_type */
@@ -915,11 +945,12 @@ struct v4l2_requestbuffers {
};
/* capabilities for struct v4l2_requestbuffers and v4l2_create_buffers */
-#define V4L2_BUF_CAP_SUPPORTS_MMAP (1 << 0)
-#define V4L2_BUF_CAP_SUPPORTS_USERPTR (1 << 1)
-#define V4L2_BUF_CAP_SUPPORTS_DMABUF (1 << 2)
-#define V4L2_BUF_CAP_SUPPORTS_REQUESTS (1 << 3)
-#define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4)
+#define V4L2_BUF_CAP_SUPPORTS_MMAP (1 << 0)
+#define V4L2_BUF_CAP_SUPPORTS_USERPTR (1 << 1)
+#define V4L2_BUF_CAP_SUPPORTS_DMABUF (1 << 2)
+#define V4L2_BUF_CAP_SUPPORTS_REQUESTS (1 << 3)
+#define V4L2_BUF_CAP_SUPPORTS_ORPHANED_BUFS (1 << 4)
+#define V4L2_BUF_CAP_SUPPORTS_M2M_HOLD_CAPTURE_BUF (1 << 5)
/**
* struct v4l2_plane - plane info for multi-planar buffers
@@ -990,7 +1021,11 @@ struct v4l2_buffer {
__u32 bytesused;
__u32 flags;
__u32 field;
+#ifdef __KERNEL__
+ struct __kernel_v4l2_timeval timestamp;
+#else
struct timeval timestamp;
+#endif
struct v4l2_timecode timecode;
__u32 sequence;
@@ -1010,6 +1045,7 @@ struct v4l2_buffer {
};
};
+#ifndef __KERNEL__
/**
* v4l2_timeval_to_ns - Convert timeval to nanoseconds
* @ts: pointer to the timeval variable to be converted
@@ -1021,6 +1057,7 @@ static inline __u64 v4l2_timeval_to_ns(const struct timeval *tv)
{
return (__u64)tv->tv_sec * 1000000000ULL + tv->tv_usec * 1000;
}
+#endif
/* Flags for 'flags' field */
/* Buffer is mapped (flag) */
@@ -1041,6 +1078,8 @@ static inline __u64 v4l2_timeval_to_ns(const struct timeval *tv)
#define V4L2_BUF_FLAG_IN_REQUEST 0x00000080
/* timecode field is valid */
#define V4L2_BUF_FLAG_TIMECODE 0x00000100
+/* Don't return the capture buffer until OUTPUT timestamp changes */
+#define V4L2_BUF_FLAG_M2M_HOLD_CAPTURE_BUF 0x00000200
/* Buffer is prepared for queuing */
#define V4L2_BUF_FLAG_PREPARED 0x00000400
/* Cache handling flags */
@@ -1208,6 +1247,10 @@ struct v4l2_selection {
typedef __u64 v4l2_std_id;
+/*
+ * Attention: Keep the V4L2_STD_* bit definitions in sync with
+ * include/dt-bindings/display/sdtv-standards.h SDTV_STD_* bit definitions.
+ */
/* one bit for each */
#define V4L2_STD_PAL_B ((v4l2_std_id)0x00000001)
#define V4L2_STD_PAL_B1 ((v4l2_std_id)0x00000002)
@@ -1675,6 +1718,7 @@ struct v4l2_ext_control {
__u8 __user *p_u8;
__u16 __user *p_u16;
__u32 __user *p_u32;
+ struct v4l2_area __user *p_area;
void __user *ptr;
};
} __attribute__ ((packed));
@@ -1720,6 +1764,7 @@ enum v4l2_ctrl_type {
V4L2_CTRL_TYPE_U8 = 0x0100,
V4L2_CTRL_TYPE_U16 = 0x0101,
V4L2_CTRL_TYPE_U32 = 0x0102,
+ V4L2_CTRL_TYPE_AREA = 0x0106,
};
/* Used in the VIDIOC_QUERYCTRL ioctl for querying controls */
@@ -1975,6 +2020,7 @@ struct v4l2_encoder_cmd {
#define V4L2_DEC_CMD_STOP (1)
#define V4L2_DEC_CMD_PAUSE (2)
#define V4L2_DEC_CMD_RESUME (3)
+#define V4L2_DEC_CMD_FLUSH (4)
/* Flags for V4L2_DEC_CMD_START */
#define V4L2_DEC_CMD_START_MUTE_AUDIO (1 << 0)
@@ -2327,7 +2373,11 @@ struct v4l2_event {
} u;
__u32 pending;
__u32 sequence;
+#ifdef __KERNEL__
+ struct __kernel_timespec timestamp;
+#else
struct timespec timestamp;
+#endif
__u32 id;
__u32 reserved[8];
};
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 4c4e24c291a5..559f42e73315 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -169,7 +169,7 @@ static inline void vring_init(struct vring *vr, unsigned int num, void *p,
{
vr->num = num;
vr->desc = p;
- vr->avail = p + num*sizeof(struct vring_desc);
+ vr->avail = (struct vring_avail *)((char *)p + num * sizeof(struct vring_desc));
vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16)
+ align-1) & ~(align - 1));
}
diff --git a/include/uapi/linux/vm_sockets.h b/include/uapi/linux/vm_sockets.h
index 68d57c5e99bc..fd0ed7221645 100644
--- a/include/uapi/linux/vm_sockets.h
+++ b/include/uapi/linux/vm_sockets.h
@@ -99,11 +99,13 @@
#define VMADDR_CID_HYPERVISOR 0
-/* This CID is specific to VMCI and can be considered reserved (even VMCI
- * doesn't use it anymore, it's a legacy value from an older release).
+/* Use this as the destination CID in an address when referring to the
+ * local communication (loopback).
+ * (This was VMADDR_CID_RESERVED, but even VMCI doesn't use it anymore,
+ * it was a legacy value from an older release).
*/
-#define VMADDR_CID_RESERVED 1
+#define VMADDR_CID_LOCAL 1
/* Use this as the destination CID in an address when referring to the host
* (any process other than the hypervisor). VMCI relies on it being 2, but
diff --git a/include/uapi/linux/wireguard.h b/include/uapi/linux/wireguard.h
new file mode 100644
index 000000000000..ae88be14c947
--- /dev/null
+++ b/include/uapi/linux/wireguard.h
@@ -0,0 +1,196 @@
+/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
+/*
+ * Copyright (C) 2015-2019 Jason A. Donenfeld <Jason@zx2c4.com>. All Rights Reserved.
+ *
+ * Documentation
+ * =============
+ *
+ * The below enums and macros are for interfacing with WireGuard, using generic
+ * netlink, with family WG_GENL_NAME and version WG_GENL_VERSION. It defines two
+ * methods: get and set. Note that while they share many common attributes,
+ * these two functions actually accept a slightly different set of inputs and
+ * outputs.
+ *
+ * WG_CMD_GET_DEVICE
+ * -----------------
+ *
+ * May only be called via NLM_F_REQUEST | NLM_F_DUMP. The command should contain
+ * one but not both of:
+ *
+ * WGDEVICE_A_IFINDEX: NLA_U32
+ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1
+ *
+ * The kernel will then return several messages (NLM_F_MULTI) containing the
+ * following tree of nested items:
+ *
+ * WGDEVICE_A_IFINDEX: NLA_U32
+ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1
+ * WGDEVICE_A_PRIVATE_KEY: NLA_EXACT_LEN, len WG_KEY_LEN
+ * WGDEVICE_A_PUBLIC_KEY: NLA_EXACT_LEN, len WG_KEY_LEN
+ * WGDEVICE_A_LISTEN_PORT: NLA_U16
+ * WGDEVICE_A_FWMARK: NLA_U32
+ * WGDEVICE_A_PEERS: NLA_NESTED
+ * 0: NLA_NESTED
+ * WGPEER_A_PUBLIC_KEY: NLA_EXACT_LEN, len WG_KEY_LEN
+ * WGPEER_A_PRESHARED_KEY: NLA_EXACT_LEN, len WG_KEY_LEN
+ * WGPEER_A_ENDPOINT: NLA_MIN_LEN(struct sockaddr), struct sockaddr_in or struct sockaddr_in6
+ * WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL: NLA_U16
+ * WGPEER_A_LAST_HANDSHAKE_TIME: NLA_EXACT_LEN, struct __kernel_timespec
+ * WGPEER_A_RX_BYTES: NLA_U64
+ * WGPEER_A_TX_BYTES: NLA_U64
+ * WGPEER_A_ALLOWEDIPS: NLA_NESTED
+ * 0: NLA_NESTED
+ * WGALLOWEDIP_A_FAMILY: NLA_U16
+ * WGALLOWEDIP_A_IPADDR: NLA_MIN_LEN(struct in_addr), struct in_addr or struct in6_addr
+ * WGALLOWEDIP_A_CIDR_MASK: NLA_U8
+ * 0: NLA_NESTED
+ * ...
+ * 0: NLA_NESTED
+ * ...
+ * ...
+ * WGPEER_A_PROTOCOL_VERSION: NLA_U32
+ * 0: NLA_NESTED
+ * ...
+ * ...
+ *
+ * It is possible that all of the allowed IPs of a single peer will not
+ * fit within a single netlink message. In that case, the same peer will
+ * be written in the following message, except it will only contain
+ * WGPEER_A_PUBLIC_KEY and WGPEER_A_ALLOWEDIPS. This may occur several
+ * times in a row for the same peer. It is then up to the receiver to
+ * coalesce adjacent peers. Likewise, it is possible that all peers will
+ * not fit within a single message. So, subsequent peers will be sent
+ * in following messages, except those will only contain WGDEVICE_A_IFNAME
+ * and WGDEVICE_A_PEERS. It is then up to the receiver to coalesce these
+ * messages to form the complete list of peers.
+ *
+ * Since this is an NLA_F_DUMP command, the final message will always be
+ * NLMSG_DONE, even if an error occurs. However, this NLMSG_DONE message
+ * contains an integer error code. It is either zero or a negative error
+ * code corresponding to the errno.
+ *
+ * WG_CMD_SET_DEVICE
+ * -----------------
+ *
+ * May only be called via NLM_F_REQUEST. The command should contain the
+ * following tree of nested items, containing one but not both of
+ * WGDEVICE_A_IFINDEX and WGDEVICE_A_IFNAME:
+ *
+ * WGDEVICE_A_IFINDEX: NLA_U32
+ * WGDEVICE_A_IFNAME: NLA_NUL_STRING, maxlen IFNAMSIZ - 1
+ * WGDEVICE_A_FLAGS: NLA_U32, 0 or WGDEVICE_F_REPLACE_PEERS if all current
+ * peers should be removed prior to adding the list below.
+ * WGDEVICE_A_PRIVATE_KEY: len WG_KEY_LEN, all zeros to remove
+ * WGDEVICE_A_LISTEN_PORT: NLA_U16, 0 to choose randomly
+ * WGDEVICE_A_FWMARK: NLA_U32, 0 to disable
+ * WGDEVICE_A_PEERS: NLA_NESTED
+ * 0: NLA_NESTED
+ * WGPEER_A_PUBLIC_KEY: len WG_KEY_LEN
+ * WGPEER_A_FLAGS: NLA_U32, 0 and/or WGPEER_F_REMOVE_ME if the
+ * specified peer should not exist at the end of the
+ * operation, rather than added/updated and/or
+ * WGPEER_F_REPLACE_ALLOWEDIPS if all current allowed
+ * IPs of this peer should be removed prior to adding
+ * the list below and/or WGPEER_F_UPDATE_ONLY if the
+ * peer should only be set if it already exists.
+ * WGPEER_A_PRESHARED_KEY: len WG_KEY_LEN, all zeros to remove
+ * WGPEER_A_ENDPOINT: struct sockaddr_in or struct sockaddr_in6
+ * WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL: NLA_U16, 0 to disable
+ * WGPEER_A_ALLOWEDIPS: NLA_NESTED
+ * 0: NLA_NESTED
+ * WGALLOWEDIP_A_FAMILY: NLA_U16
+ * WGALLOWEDIP_A_IPADDR: struct in_addr or struct in6_addr
+ * WGALLOWEDIP_A_CIDR_MASK: NLA_U8
+ * 0: NLA_NESTED
+ * ...
+ * 0: NLA_NESTED
+ * ...
+ * ...
+ * WGPEER_A_PROTOCOL_VERSION: NLA_U32, should not be set or used at
+ * all by most users of this API, as the
+ * most recent protocol will be used when
+ * this is unset. Otherwise, must be set
+ * to 1.
+ * 0: NLA_NESTED
+ * ...
+ * ...
+ *
+ * It is possible that the amount of configuration data exceeds that of
+ * the maximum message length accepted by the kernel. In that case, several
+ * messages should be sent one after another, with each successive one
+ * filling in information not contained in the prior. Note that if
+ * WGDEVICE_F_REPLACE_PEERS is specified in the first message, it probably
+ * should not be specified in fragments that come after, so that the list
+ * of peers is only cleared the first time but appended after. Likewise for
+ * peers, if WGPEER_F_REPLACE_ALLOWEDIPS is specified in the first message
+ * of a peer, it likely should not be specified in subsequent fragments.
+ *
+ * If an error occurs, NLMSG_ERROR will reply containing an errno.
+ */
+
+#ifndef _WG_UAPI_WIREGUARD_H
+#define _WG_UAPI_WIREGUARD_H
+
+#define WG_GENL_NAME "wireguard"
+#define WG_GENL_VERSION 1
+
+#define WG_KEY_LEN 32
+
+enum wg_cmd {
+ WG_CMD_GET_DEVICE,
+ WG_CMD_SET_DEVICE,
+ __WG_CMD_MAX
+};
+#define WG_CMD_MAX (__WG_CMD_MAX - 1)
+
+enum wgdevice_flag {
+ WGDEVICE_F_REPLACE_PEERS = 1U << 0,
+ __WGDEVICE_F_ALL = WGDEVICE_F_REPLACE_PEERS
+};
+enum wgdevice_attribute {
+ WGDEVICE_A_UNSPEC,
+ WGDEVICE_A_IFINDEX,
+ WGDEVICE_A_IFNAME,
+ WGDEVICE_A_PRIVATE_KEY,
+ WGDEVICE_A_PUBLIC_KEY,
+ WGDEVICE_A_FLAGS,
+ WGDEVICE_A_LISTEN_PORT,
+ WGDEVICE_A_FWMARK,
+ WGDEVICE_A_PEERS,
+ __WGDEVICE_A_LAST
+};
+#define WGDEVICE_A_MAX (__WGDEVICE_A_LAST - 1)
+
+enum wgpeer_flag {
+ WGPEER_F_REMOVE_ME = 1U << 0,
+ WGPEER_F_REPLACE_ALLOWEDIPS = 1U << 1,
+ WGPEER_F_UPDATE_ONLY = 1U << 2,
+ __WGPEER_F_ALL = WGPEER_F_REMOVE_ME | WGPEER_F_REPLACE_ALLOWEDIPS |
+ WGPEER_F_UPDATE_ONLY
+};
+enum wgpeer_attribute {
+ WGPEER_A_UNSPEC,
+ WGPEER_A_PUBLIC_KEY,
+ WGPEER_A_PRESHARED_KEY,
+ WGPEER_A_FLAGS,
+ WGPEER_A_ENDPOINT,
+ WGPEER_A_PERSISTENT_KEEPALIVE_INTERVAL,
+ WGPEER_A_LAST_HANDSHAKE_TIME,
+ WGPEER_A_RX_BYTES,
+ WGPEER_A_TX_BYTES,
+ WGPEER_A_ALLOWEDIPS,
+ WGPEER_A_PROTOCOL_VERSION,
+ __WGPEER_A_LAST
+};
+#define WGPEER_A_MAX (__WGPEER_A_LAST - 1)
+
+enum wgallowedip_attribute {
+ WGALLOWEDIP_A_UNSPEC,
+ WGALLOWEDIP_A_FAMILY,
+ WGALLOWEDIP_A_IPADDR,
+ WGALLOWEDIP_A_CIDR_MASK,
+ __WGALLOWEDIP_A_LAST
+};
+#define WGALLOWEDIP_A_MAX (__WGALLOWEDIP_A_LAST - 1)
+
+#endif /* _WG_UAPI_WIREGUARD_H */
diff --git a/include/uapi/misc/fastrpc.h b/include/uapi/misc/fastrpc.h
index fb792e882cef..07de2b7aac85 100644
--- a/include/uapi/misc/fastrpc.h
+++ b/include/uapi/misc/fastrpc.h
@@ -10,6 +10,8 @@
#define FASTRPC_IOCTL_INVOKE _IOWR('R', 3, struct fastrpc_invoke)
#define FASTRPC_IOCTL_INIT_ATTACH _IO('R', 4)
#define FASTRPC_IOCTL_INIT_CREATE _IOWR('R', 5, struct fastrpc_init_create)
+#define FASTRPC_IOCTL_MMAP _IOWR('R', 6, struct fastrpc_req_mmap)
+#define FASTRPC_IOCTL_MUNMAP _IOWR('R', 7, struct fastrpc_req_munmap)
struct fastrpc_invoke_args {
__u64 ptr;
@@ -38,4 +40,17 @@ struct fastrpc_alloc_dma_buf {
__u64 size; /* size */
};
+struct fastrpc_req_mmap {
+ __s32 fd;
+ __u32 flags; /* flags for dsp to map with */
+ __u64 vaddrin; /* optional virtual address */
+ __u64 size; /* size */
+ __u64 vaddrout; /* dsp virtual address */
+};
+
+struct fastrpc_req_munmap {
+ __u64 vaddrout; /* address to unmap */
+ __u64 size; /* size */
+};
+
#endif /* __QCOM_FASTRPC_H__ */
diff --git a/include/uapi/misc/habanalabs.h b/include/uapi/misc/habanalabs.h
index 39c4ea51a719..4faa2c9767e5 100644
--- a/include/uapi/misc/habanalabs.h
+++ b/include/uapi/misc/habanalabs.h
@@ -88,13 +88,19 @@ enum hl_device_status {
* internal engine.
* HL_INFO_DEVICE_STATUS - Retrieve the device's status. This opcode doesn't
* require an open context.
- * HL_INFO_DEVICE_UTILIZATION - Retrieve the total utilization of the device
- * over the last period specified by the user.
- * The period can be between 100ms to 1s, in
- * resolution of 100ms. The return value is a
- * percentage of the utilization rate.
+ * HL_INFO_DEVICE_UTILIZATION - Retrieve the total utilization of the device
+ * over the last period specified by the user.
+ * The period can be between 100ms to 1s, in
+ * resolution of 100ms. The return value is a
+ * percentage of the utilization rate.
* HL_INFO_HW_EVENTS_AGGREGATE - Receive an array describing how many times each
* event occurred since the driver was loaded.
+ * HL_INFO_CLK_RATE - Retrieve the current and maximum clock rate
+ * of the device in MHz. The maximum clock rate is
+ * configurable via sysfs parameter
+ * HL_INFO_RESET_COUNT - Retrieve the counts of the soft and hard reset
+ * operations performed on the device since the last
+ * time the driver was loaded.
*/
#define HL_INFO_HW_IP_INFO 0
#define HL_INFO_HW_EVENTS 1
@@ -103,8 +109,11 @@ enum hl_device_status {
#define HL_INFO_DEVICE_STATUS 4
#define HL_INFO_DEVICE_UTILIZATION 6
#define HL_INFO_HW_EVENTS_AGGREGATE 7
+#define HL_INFO_CLK_RATE 8
+#define HL_INFO_RESET_COUNT 9
#define HL_INFO_VERSION_MAX_LEN 128
+#define HL_INFO_CARD_NAME_MAX_LEN 16
struct hl_info_hw_ip_info {
__u64 sram_base_address;
@@ -123,6 +132,7 @@ struct hl_info_hw_ip_info {
__u8 dram_enabled;
__u8 pad[2];
__u8 armcp_version[HL_INFO_VERSION_MAX_LEN];
+ __u8 card_name[HL_INFO_CARD_NAME_MAX_LEN];
};
struct hl_info_dram_usage {
@@ -149,6 +159,16 @@ struct hl_info_device_utilization {
__u32 pad;
};
+struct hl_info_clk_rate {
+ __u32 cur_clk_rate_mhz;
+ __u32 max_clk_rate_mhz;
+};
+
+struct hl_info_reset_count {
+ __u32 hard_reset_cnt;
+ __u32 soft_reset_cnt;
+};
+
struct hl_info_args {
/* Location of relevant struct in userspace */
__u64 return_pointer;
@@ -181,13 +201,15 @@ struct hl_info_args {
/* Opcode to destroy previously created command buffer */
#define HL_CB_OP_DESTROY 1
+#define HL_MAX_CB_SIZE 0x200000 /* 2MB */
+
struct hl_cb_in {
/* Handle of CB or 0 if we want to create one */
__u64 cb_handle;
/* HL_CB_OP_* */
__u32 op;
- /* Size of CB. Maximum size is 2MB. The minimum size that will be
- * allocated, regardless of this parameter's value, is PAGE_SIZE
+ /* Size of CB. Maximum size is HL_MAX_CB_SIZE. The minimum size that
+ * will be allocated, regardless of this parameter's value, is PAGE_SIZE
*/
__u32 cb_size;
/* Context ID - Currently not in use */
@@ -233,6 +255,8 @@ struct hl_cs_chunk {
#define HL_CS_STATUS_SUCCESS 0
+#define HL_MAX_JOBS_PER_CS 512
+
struct hl_cs_in {
/* this holds address of array of hl_cs_chunk for restore phase */
__u64 chunks_restore;
@@ -242,9 +266,13 @@ struct hl_cs_in {
* Currently not in use
*/
__u64 chunks_store;
- /* Number of chunks in restore phase array */
+ /* Number of chunks in restore phase array. Maximum number is
+ * HL_MAX_JOBS_PER_CS
+ */
__u32 num_chunks_restore;
- /* Number of chunks in execution array */
+ /* Number of chunks in execution array. Maximum number is
+ * HL_MAX_JOBS_PER_CS
+ */
__u32 num_chunks_execute;
/* Number of chunks in restore phase array - Currently not in use */
__u32 num_chunks_store;
@@ -589,7 +617,7 @@ struct hl_debug_args {
*
* The user can call this IOCTL with a handle it received from the CS IOCTL
* to wait until the handle's CS has finished executing. The user will wait
- * inside the kernel until the CS has finished or until the user-requeusted
+ * inside the kernel until the CS has finished or until the user-requested
* timeout has expired.
*
* The return value of the IOCTL is a standard Linux error code. The possible
diff --git a/include/uapi/misc/pvpanic.h b/include/uapi/misc/pvpanic.h
new file mode 100644
index 000000000000..54b7485390d3
--- /dev/null
+++ b/include/uapi/misc/pvpanic.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+
+#ifndef __PVPANIC_H__
+#define __PVPANIC_H__
+
+#define PVPANIC_PANICKED (1 << 0)
+#define PVPANIC_CRASH_LOADED (1 << 1)
+
+#endif /* __PVPANIC_H__ */
diff --git a/include/uapi/rdma/cxgb3-abi.h b/include/uapi/rdma/cxgb3-abi.h
deleted file mode 100644
index 85aed672f43e..000000000000
--- a/include/uapi/rdma/cxgb3-abi.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
-/*
- * Copyright (c) 2006 Chelsio, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-#ifndef CXGB3_ABI_USER_H
-#define CXGB3_ABI_USER_H
-
-#include <linux/types.h>
-
-#define IWCH_UVERBS_ABI_VERSION 1
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __aligned_u64
- * instead.
- */
-struct iwch_create_cq_req {
- __aligned_u64 user_rptr_addr;
-};
-
-struct iwch_create_cq_resp_v0 {
- __aligned_u64 key;
- __u32 cqid;
- __u32 size_log2;
-};
-
-struct iwch_create_cq_resp {
- __aligned_u64 key;
- __u32 cqid;
- __u32 size_log2;
- __u32 memsize;
- __u32 reserved;
-};
-
-struct iwch_create_qp_resp {
- __aligned_u64 key;
- __aligned_u64 db_key;
- __u32 qpid;
- __u32 size_log2;
- __u32 sq_size_log2;
- __u32 rq_size_log2;
-};
-
-struct iwch_reg_user_mr_resp {
- __u32 pbl_addr;
-};
-
-struct iwch_alloc_pd_resp {
- __u32 pdid;
-};
-
-#endif /* CXGB3_ABI_USER_H */
diff --git a/include/uapi/rdma/efa-abi.h b/include/uapi/rdma/efa-abi.h
index 9599a2a62be8..53b6e2036a9b 100644
--- a/include/uapi/rdma/efa-abi.h
+++ b/include/uapi/rdma/efa-abi.h
@@ -90,12 +90,18 @@ struct efa_ibv_create_ah_resp {
__u8 reserved_30[2];
};
+enum {
+ EFA_QUERY_DEVICE_CAPS_RDMA_READ = 1 << 0,
+};
+
struct efa_ibv_ex_query_device_resp {
__u32 comp_mask;
__u32 max_sq_wr;
__u32 max_rq_wr;
__u16 max_sq_sge;
__u16 max_rq_sge;
+ __u32 max_rdma_size;
+ __u32 device_caps;
};
#endif /* EFA_ABI_USER_H */
diff --git a/include/uapi/rdma/ib_user_ioctl_cmds.h b/include/uapi/rdma/ib_user_ioctl_cmds.h
index 64f0e3aacd3f..d4ddbe4e696c 100644
--- a/include/uapi/rdma/ib_user_ioctl_cmds.h
+++ b/include/uapi/rdma/ib_user_ioctl_cmds.h
@@ -56,6 +56,7 @@ enum uverbs_default_objects {
UVERBS_OBJECT_FLOW_ACTION,
UVERBS_OBJECT_DM,
UVERBS_OBJECT_COUNTERS,
+ UVERBS_OBJECT_ASYNC_EVENT,
};
enum {
@@ -67,6 +68,7 @@ enum uverbs_methods_device {
UVERBS_METHOD_INVOKE_WRITE,
UVERBS_METHOD_INFO_HANDLES,
UVERBS_METHOD_QUERY_PORT,
+ UVERBS_METHOD_GET_CONTEXT,
};
enum uverbs_attrs_invoke_write_cmd_attr_ids {
@@ -80,6 +82,11 @@ enum uverbs_attrs_query_port_cmd_attr_ids {
UVERBS_ATTR_QUERY_PORT_RESP,
};
+enum uverbs_attrs_get_context_attr_ids {
+ UVERBS_ATTR_GET_CONTEXT_NUM_COMP_VECTORS,
+ UVERBS_ATTR_GET_CONTEXT_CORE_SUPPORT,
+};
+
enum uverbs_attrs_create_cq_cmd_attr_ids {
UVERBS_ATTR_CREATE_CQ_HANDLE,
UVERBS_ATTR_CREATE_CQ_CQE,
@@ -241,4 +248,12 @@ enum uverbs_attrs_flow_destroy_ids {
UVERBS_ATTR_DESTROY_FLOW_HANDLE,
};
+enum uverbs_method_async_event {
+ UVERBS_METHOD_ASYNC_EVENT_ALLOC,
+};
+
+enum uverbs_attrs_async_event_create {
+ UVERBS_ATTR_ASYNC_EVENT_ALLOC_FD_HANDLE,
+};
+
#endif
diff --git a/include/uapi/rdma/ib_user_ioctl_verbs.h b/include/uapi/rdma/ib_user_ioctl_verbs.h
index 72c7fc75f960..a640bb814be0 100644
--- a/include/uapi/rdma/ib_user_ioctl_verbs.h
+++ b/include/uapi/rdma/ib_user_ioctl_verbs.h
@@ -41,6 +41,13 @@
#define RDMA_UAPI_PTR(_type, _name) __aligned_u64 _name
#endif
+#define IB_UVERBS_ACCESS_OPTIONAL_FIRST (1 << 20)
+#define IB_UVERBS_ACCESS_OPTIONAL_LAST (1 << 29)
+
+enum ib_uverbs_core_support {
+ IB_UVERBS_CORE_SUPPORT_OPTIONAL_MR_ACCESS = 1 << 0,
+};
+
enum ib_uverbs_access_flags {
IB_UVERBS_ACCESS_LOCAL_WRITE = 1 << 0,
IB_UVERBS_ACCESS_REMOTE_WRITE = 1 << 1,
@@ -50,6 +57,11 @@ enum ib_uverbs_access_flags {
IB_UVERBS_ACCESS_ZERO_BASED = 1 << 5,
IB_UVERBS_ACCESS_ON_DEMAND = 1 << 6,
IB_UVERBS_ACCESS_HUGETLB = 1 << 7,
+
+ IB_UVERBS_ACCESS_RELAXED_ORDERING = IB_UVERBS_ACCESS_OPTIONAL_FIRST,
+ IB_UVERBS_ACCESS_OPTIONAL_RANGE =
+ ((IB_UVERBS_ACCESS_OPTIONAL_LAST << 1) - 1) &
+ ~(IB_UVERBS_ACCESS_OPTIONAL_FIRST - 1)
};
enum ib_uverbs_query_port_cap_flags {
@@ -173,4 +185,26 @@ struct ib_uverbs_query_port_resp_ex {
__u8 reserved[6];
};
+enum rdma_driver_id {
+ RDMA_DRIVER_UNKNOWN,
+ RDMA_DRIVER_MLX5,
+ RDMA_DRIVER_MLX4,
+ RDMA_DRIVER_CXGB3,
+ RDMA_DRIVER_CXGB4,
+ RDMA_DRIVER_MTHCA,
+ RDMA_DRIVER_BNXT_RE,
+ RDMA_DRIVER_OCRDMA,
+ RDMA_DRIVER_NES,
+ RDMA_DRIVER_I40IW,
+ RDMA_DRIVER_VMW_PVRDMA,
+ RDMA_DRIVER_QEDR,
+ RDMA_DRIVER_HNS,
+ RDMA_DRIVER_USNIC,
+ RDMA_DRIVER_RXE,
+ RDMA_DRIVER_HFI1,
+ RDMA_DRIVER_QIB,
+ RDMA_DRIVER_EFA,
+ RDMA_DRIVER_SIW,
+};
+
#endif
diff --git a/include/uapi/rdma/mlx5_user_ioctl_cmds.h b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
index d0da070cf0ab..afe7da6f2b8e 100644
--- a/include/uapi/rdma/mlx5_user_ioctl_cmds.h
+++ b/include/uapi/rdma/mlx5_user_ioctl_cmds.h
@@ -115,6 +115,22 @@ enum mlx5_ib_devx_obj_methods {
MLX5_IB_METHOD_DEVX_OBJ_ASYNC_QUERY,
};
+enum mlx5_ib_var_alloc_attrs {
+ MLX5_IB_ATTR_VAR_OBJ_ALLOC_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_OFFSET,
+ MLX5_IB_ATTR_VAR_OBJ_ALLOC_MMAP_LENGTH,
+ MLX5_IB_ATTR_VAR_OBJ_ALLOC_PAGE_ID,
+};
+
+enum mlx5_ib_var_obj_destroy_attrs {
+ MLX5_IB_ATTR_VAR_OBJ_DESTROY_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
+};
+
+enum mlx5_ib_var_obj_methods {
+ MLX5_IB_METHOD_VAR_OBJ_ALLOC = (1U << UVERBS_ID_NS_SHIFT),
+ MLX5_IB_METHOD_VAR_OBJ_DESTROY,
+};
+
enum mlx5_ib_devx_umem_reg_attrs {
MLX5_IB_ATTR_DEVX_UMEM_REG_HANDLE = (1U << UVERBS_ID_NS_SHIFT),
MLX5_IB_ATTR_DEVX_UMEM_REG_ADDR,
@@ -156,6 +172,7 @@ enum mlx5_ib_objects {
MLX5_IB_OBJECT_FLOW_MATCHER,
MLX5_IB_OBJECT_DEVX_ASYNC_CMD_FD,
MLX5_IB_OBJECT_DEVX_ASYNC_EVENT_FD,
+ MLX5_IB_OBJECT_VAR,
};
enum mlx5_ib_flow_matcher_create_attrs {
@@ -198,6 +215,7 @@ enum mlx5_ib_create_flow_attrs {
MLX5_IB_ATTR_CREATE_FLOW_ARR_FLOW_ACTIONS,
MLX5_IB_ATTR_CREATE_FLOW_TAG,
MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX,
+ MLX5_IB_ATTR_CREATE_FLOW_ARR_COUNTERS_DEVX_OFFSET,
};
enum mlx5_ib_destoy_flow_attrs {
diff --git a/include/uapi/rdma/nes-abi.h b/include/uapi/rdma/nes-abi.h
deleted file mode 100644
index f80495baa969..000000000000
--- a/include/uapi/rdma/nes-abi.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/* SPDX-License-Identifier: ((GPL-2.0 WITH Linux-syscall-note) OR Linux-OpenIB) */
-/*
- * Copyright (c) 2006 - 2011 Intel Corporation. All rights reserved.
- * Copyright (c) 2005 Topspin Communications. All rights reserved.
- * Copyright (c) 2005 Cisco Systems. All rights reserved.
- * Copyright (c) 2005 Open Grid Computing, Inc. All rights reserved.
- *
- * This software is available to you under a choice of one of two
- * licenses. You may choose to be licensed under the terms of the GNU
- * General Public License (GPL) Version 2, available from the file
- * COPYING in the main directory of this source tree, or the
- * OpenIB.org BSD license below:
- *
- * Redistribution and use in source and binary forms, with or
- * without modification, are permitted provided that the following
- * conditions are met:
- *
- * - Redistributions of source code must retain the above
- * copyright notice, this list of conditions and the following
- * disclaimer.
- *
- * - Redistributions in binary form must reproduce the above
- * copyright notice, this list of conditions and the following
- * disclaimer in the documentation and/or other materials
- * provided with the distribution.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- *
- */
-
-#ifndef NES_ABI_USER_H
-#define NES_ABI_USER_H
-
-#include <linux/types.h>
-
-#define NES_ABI_USERSPACE_VER 2
-#define NES_ABI_KERNEL_VER 2
-
-/*
- * Make sure that all structs defined in this file remain laid out so
- * that they pack the same way on 32-bit and 64-bit architectures (to
- * avoid incompatibility between 32-bit userspace and 64-bit kernels).
- * In particular do not use pointer types -- pass pointers in __u64
- * instead.
- */
-
-struct nes_alloc_ucontext_req {
- __u32 reserved32;
- __u8 userspace_ver;
- __u8 reserved8[3];
-};
-
-struct nes_alloc_ucontext_resp {
- __u32 max_pds; /* maximum pds allowed for this user process */
- __u32 max_qps; /* maximum qps allowed for this user process */
- __u32 wq_size; /* size of the WQs (sq+rq) allocated to the mmaped area */
- __u8 virtwq; /* flag to indicate if virtual WQ are to be used or not */
- __u8 kernel_ver;
- __u8 reserved[2];
-};
-
-struct nes_alloc_pd_resp {
- __u32 pd_id;
- __u32 mmap_db_index;
-};
-
-struct nes_create_cq_req {
- __aligned_u64 user_cq_buffer;
- __u32 mcrqf;
- __u8 reserved[4];
-};
-
-struct nes_create_qp_req {
- __aligned_u64 user_wqe_buffers;
- __aligned_u64 user_qp_buffer;
-};
-
-enum iwnes_memreg_type {
- IWNES_MEMREG_TYPE_MEM = 0x0000,
- IWNES_MEMREG_TYPE_QP = 0x0001,
- IWNES_MEMREG_TYPE_CQ = 0x0002,
- IWNES_MEMREG_TYPE_MW = 0x0003,
- IWNES_MEMREG_TYPE_FMR = 0x0004,
- IWNES_MEMREG_TYPE_FMEM = 0x0005,
-};
-
-struct nes_mem_reg_req {
- __u32 reg_type; /* indicates if id is memory, QP or CQ */
- __u32 reserved;
-};
-
-struct nes_create_cq_resp {
- __u32 cq_id;
- __u32 cq_size;
- __u32 mmap_db_index;
- __u32 reserved;
-};
-
-struct nes_create_qp_resp {
- __u32 qp_id;
- __u32 actual_sq_size;
- __u32 actual_rq_size;
- __u32 mmap_sq_db_index;
- __u32 mmap_rq_db_index;
- __u32 nes_drv_opt;
-};
-
-#endif /* NES_ABI_USER_H */
diff --git a/include/uapi/rdma/qedr-abi.h b/include/uapi/rdma/qedr-abi.h
index 7a10b3a325fa..a0b83c9d4498 100644
--- a/include/uapi/rdma/qedr-abi.h
+++ b/include/uapi/rdma/qedr-abi.h
@@ -38,6 +38,27 @@
#define QEDR_ABI_VERSION (8)
/* user kernel communication data structures. */
+enum qedr_alloc_ucontext_flags {
+ QEDR_ALLOC_UCTX_RESERVED = 1 << 0,
+ QEDR_ALLOC_UCTX_DB_REC = 1 << 1
+};
+
+struct qedr_alloc_ucontext_req {
+ __u32 context_flags;
+ __u32 reserved;
+};
+
+#define QEDR_LDPM_MAX_SIZE (8192)
+#define QEDR_EDPM_TRANS_SIZE (64)
+
+enum qedr_rdma_dpm_type {
+ QEDR_DPM_TYPE_NONE = 0,
+ QEDR_DPM_TYPE_ROCE_ENHANCED = 1 << 0,
+ QEDR_DPM_TYPE_ROCE_LEGACY = 1 << 1,
+ QEDR_DPM_TYPE_IWARP_LEGACY = 1 << 2,
+ QEDR_DPM_TYPE_RESERVED = 1 << 3,
+ QEDR_DPM_SIZES_SET = 1 << 4,
+};
struct qedr_alloc_ucontext_resp {
__aligned_u64 db_pa;
@@ -50,10 +71,12 @@ struct qedr_alloc_ucontext_resp {
__u32 sges_per_recv_wr;
__u32 sges_per_srq_wr;
__u32 max_cqes;
- __u8 dpm_enabled;
+ __u8 dpm_flags;
__u8 wids_enabled;
__u16 wid_count;
- __u32 reserved;
+ __u16 ldpm_limit_size;
+ __u8 edpm_trans_size;
+ __u8 reserved;
};
struct qedr_alloc_pd_ureq {
@@ -74,6 +97,7 @@ struct qedr_create_cq_uresp {
__u32 db_offset;
__u16 icid;
__u16 reserved;
+ __aligned_u64 db_rec_addr;
};
struct qedr_create_qp_ureq {
@@ -109,6 +133,13 @@ struct qedr_create_qp_uresp {
__u32 rq_db2_offset;
__u32 reserved;
+
+ /* address of SQ doorbell recovery user entry */
+ __aligned_u64 sq_db_rec_addr;
+
+ /* address of RQ doorbell recovery user entry */
+ __aligned_u64 rq_db_rec_addr;
+
};
struct qedr_create_srq_ureq {
@@ -128,4 +159,12 @@ struct qedr_create_srq_uresp {
__u32 reserved1;
};
+/* doorbell recovery entry allocated and populated by userspace doorbelling
+ * entities and mapped to kernel. Kernel uses this to register doorbell
+ * information with doorbell drop recovery mechanism.
+ */
+struct qedr_user_db_rec {
+ __aligned_u64 db_data; /* doorbell data */
+};
+
#endif /* __QEDR_USER_H__ */
diff --git a/include/uapi/rdma/rdma_user_ioctl_cmds.h b/include/uapi/rdma/rdma_user_ioctl_cmds.h
index b8bb285f6b2a..7b1ec806f8f9 100644
--- a/include/uapi/rdma/rdma_user_ioctl_cmds.h
+++ b/include/uapi/rdma/rdma_user_ioctl_cmds.h
@@ -84,26 +84,4 @@ struct ib_uverbs_ioctl_hdr {
struct ib_uverbs_attr attrs[0];
};
-enum rdma_driver_id {
- RDMA_DRIVER_UNKNOWN,
- RDMA_DRIVER_MLX5,
- RDMA_DRIVER_MLX4,
- RDMA_DRIVER_CXGB3,
- RDMA_DRIVER_CXGB4,
- RDMA_DRIVER_MTHCA,
- RDMA_DRIVER_BNXT_RE,
- RDMA_DRIVER_OCRDMA,
- RDMA_DRIVER_NES,
- RDMA_DRIVER_I40IW,
- RDMA_DRIVER_VMW_PVRDMA,
- RDMA_DRIVER_QEDR,
- RDMA_DRIVER_HNS,
- RDMA_DRIVER_USNIC,
- RDMA_DRIVER_RXE,
- RDMA_DRIVER_HFI1,
- RDMA_DRIVER_QIB,
- RDMA_DRIVER_EFA,
- RDMA_DRIVER_SIW,
-};
-
#endif
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index 6e73f0274e41..f8b638c73371 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -179,6 +179,11 @@ struct pvrdma_create_qp {
__aligned_u64 qp_addr;
};
+struct pvrdma_create_qp_resp {
+ __u32 qpn;
+ __u32 qp_handle;
+};
+
/* PVRDMA masked atomic compare and swap */
struct pvrdma_ex_cmp_swap {
__aligned_u64 swap_val;
diff --git a/include/uapi/scsi/scsi_bsg_ufs.h b/include/uapi/scsi/scsi_bsg_ufs.h
index 9988db6ad244..d55f2176dfd4 100644
--- a/include/uapi/scsi/scsi_bsg_ufs.h
+++ b/include/uapi/scsi/scsi_bsg_ufs.h
@@ -68,14 +68,13 @@ struct utp_upiu_cmd {
* @header:UPIU header structure DW-0 to DW-2
* @sc: fields structure for scsi command DW-3 to DW-7
* @qr: fields structure for query request DW-3 to DW-7
+ * @uc: use utp_upiu_query to host the 4 dwords of uic command
*/
struct utp_upiu_req {
struct utp_upiu_header header;
union {
struct utp_upiu_cmd sc;
struct utp_upiu_query qr;
- struct utp_upiu_query tr;
- /* use utp_upiu_query to host the 4 dwords of uic command */
struct utp_upiu_query uc;
};
};
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index a74ca232f1fc..6048553c119d 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -17,7 +17,6 @@
#define __LINUX_UAPI_SND_ASOC_H
#include <linux/types.h>
-#include <sound/asound.h>
/*
* Maximum number of channels topology kcontrol can represent.
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index df1153cea0b7..535a7229e1d9 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -26,7 +26,9 @@
#if defined(__KERNEL__) || defined(__linux__)
#include <linux/types.h>
+#include <asm/byteorder.h>
#else
+#include <endian.h>
#include <sys/ioctl.h>
#endif
@@ -154,7 +156,7 @@ struct snd_hwdep_dsp_image {
* *
*****************************************************************************/
-#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 14)
+#define SNDRV_PCM_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 15)
typedef unsigned long snd_pcm_uframes_t;
typedef signed long snd_pcm_sframes_t;
@@ -301,7 +303,9 @@ typedef int __bitwise snd_pcm_subformat_t;
#define SNDRV_PCM_INFO_DRAIN_TRIGGER 0x40000000 /* internal kernel flag - trigger in drain */
#define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */
-
+#if (__BITS_PER_LONG == 32 && defined(__USE_TIME_BITS64)) || defined __KERNEL__
+#define __SND_STRUCT_TIME64
+#endif
typedef int __bitwise snd_pcm_state_t;
#define SNDRV_PCM_STATE_OPEN ((__force snd_pcm_state_t) 0) /* stream is open */
@@ -317,8 +321,17 @@ typedef int __bitwise snd_pcm_state_t;
enum {
SNDRV_PCM_MMAP_OFFSET_DATA = 0x00000000,
- SNDRV_PCM_MMAP_OFFSET_STATUS = 0x80000000,
- SNDRV_PCM_MMAP_OFFSET_CONTROL = 0x81000000,
+ SNDRV_PCM_MMAP_OFFSET_STATUS_OLD = 0x80000000,
+ SNDRV_PCM_MMAP_OFFSET_CONTROL_OLD = 0x81000000,
+ SNDRV_PCM_MMAP_OFFSET_STATUS_NEW = 0x82000000,
+ SNDRV_PCM_MMAP_OFFSET_CONTROL_NEW = 0x83000000,
+#ifdef __SND_STRUCT_TIME64
+ SNDRV_PCM_MMAP_OFFSET_STATUS = SNDRV_PCM_MMAP_OFFSET_STATUS_NEW,
+ SNDRV_PCM_MMAP_OFFSET_CONTROL = SNDRV_PCM_MMAP_OFFSET_CONTROL_NEW,
+#else
+ SNDRV_PCM_MMAP_OFFSET_STATUS = SNDRV_PCM_MMAP_OFFSET_STATUS_OLD,
+ SNDRV_PCM_MMAP_OFFSET_CONTROL = SNDRV_PCM_MMAP_OFFSET_CONTROL_OLD,
+#endif
};
union snd_pcm_sync_id {
@@ -456,8 +469,13 @@ enum {
SNDRV_PCM_AUDIO_TSTAMP_TYPE_LAST = SNDRV_PCM_AUDIO_TSTAMP_TYPE_LINK_SYNCHRONIZED
};
+#ifndef __KERNEL__
+/* explicit padding avoids incompatibility between i386 and x86-64 */
+typedef struct { unsigned char pad[sizeof(time_t) - sizeof(int)]; } __time_pad;
+
struct snd_pcm_status {
snd_pcm_state_t state; /* stream state */
+ __time_pad pad1; /* align to timespec */
struct timespec trigger_tstamp; /* time when stream was started/stopped/paused */
struct timespec tstamp; /* reference timestamp */
snd_pcm_uframes_t appl_ptr; /* appl ptr */
@@ -473,17 +491,48 @@ struct snd_pcm_status {
__u32 audio_tstamp_accuracy; /* in ns units, only valid if indicated in audio_tstamp_data */
unsigned char reserved[52-2*sizeof(struct timespec)]; /* must be filled with zero */
};
+#endif
+
+/*
+ * For mmap operations, we need the 64-bit layout, both for compat mode,
+ * and for y2038 compatibility. For 64-bit applications, the two definitions
+ * are identical, so we keep the traditional version.
+ */
+#ifdef __SND_STRUCT_TIME64
+#define __snd_pcm_mmap_status64 snd_pcm_mmap_status
+#define __snd_pcm_mmap_control64 snd_pcm_mmap_control
+#define __snd_pcm_sync_ptr64 snd_pcm_sync_ptr
+#ifdef __KERNEL__
+#define __snd_timespec64 __kernel_timespec
+#else
+#define __snd_timespec64 timespec
+#endif
+struct __snd_timespec {
+ __s32 tv_sec;
+ __s32 tv_nsec;
+};
+#else
+#define __snd_pcm_mmap_status snd_pcm_mmap_status
+#define __snd_pcm_mmap_control snd_pcm_mmap_control
+#define __snd_pcm_sync_ptr snd_pcm_sync_ptr
+#define __snd_timespec timespec
+struct __snd_timespec64 {
+ __s64 tv_sec;
+ __s64 tv_nsec;
+};
-struct snd_pcm_mmap_status {
+#endif
+
+struct __snd_pcm_mmap_status {
snd_pcm_state_t state; /* RO: state - SNDRV_PCM_STATE_XXXX */
int pad1; /* Needed for 64 bit alignment */
snd_pcm_uframes_t hw_ptr; /* RO: hw ptr (0...boundary-1) */
- struct timespec tstamp; /* Timestamp */
+ struct __snd_timespec tstamp; /* Timestamp */
snd_pcm_state_t suspended_state; /* RO: suspended stream state */
- struct timespec audio_tstamp; /* from sample counter or wall clock */
+ struct __snd_timespec audio_tstamp; /* from sample counter or wall clock */
};
-struct snd_pcm_mmap_control {
+struct __snd_pcm_mmap_control {
snd_pcm_uframes_t appl_ptr; /* RW: appl ptr (0...boundary-1) */
snd_pcm_uframes_t avail_min; /* RW: min available frames for wakeup */
};
@@ -492,14 +541,59 @@ struct snd_pcm_mmap_control {
#define SNDRV_PCM_SYNC_PTR_APPL (1<<1) /* get appl_ptr from driver (r/w op) */
#define SNDRV_PCM_SYNC_PTR_AVAIL_MIN (1<<2) /* get avail_min from driver */
-struct snd_pcm_sync_ptr {
+struct __snd_pcm_sync_ptr {
unsigned int flags;
union {
- struct snd_pcm_mmap_status status;
+ struct __snd_pcm_mmap_status status;
+ unsigned char reserved[64];
+ } s;
+ union {
+ struct __snd_pcm_mmap_control control;
+ unsigned char reserved[64];
+ } c;
+};
+
+#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __BIG_ENDIAN : defined(__BIG_ENDIAN)
+typedef char __pad_before_uframe[sizeof(__u64) - sizeof(snd_pcm_uframes_t)];
+typedef char __pad_after_uframe[0];
+#endif
+
+#if defined(__BYTE_ORDER) ? __BYTE_ORDER == __LITTLE_ENDIAN : defined(__LITTLE_ENDIAN)
+typedef char __pad_before_uframe[0];
+typedef char __pad_after_uframe[sizeof(__u64) - sizeof(snd_pcm_uframes_t)];
+#endif
+
+struct __snd_pcm_mmap_status64 {
+ snd_pcm_state_t state; /* RO: state - SNDRV_PCM_STATE_XXXX */
+ __u32 pad1; /* Needed for 64 bit alignment */
+ __pad_before_uframe __pad1;
+ snd_pcm_uframes_t hw_ptr; /* RO: hw ptr (0...boundary-1) */
+ __pad_after_uframe __pad2;
+ struct __snd_timespec64 tstamp; /* Timestamp */
+ snd_pcm_state_t suspended_state;/* RO: suspended stream state */
+ __u32 pad3; /* Needed for 64 bit alignment */
+ struct __snd_timespec64 audio_tstamp; /* sample counter or wall clock */
+};
+
+struct __snd_pcm_mmap_control64 {
+ __pad_before_uframe __pad1;
+ snd_pcm_uframes_t appl_ptr; /* RW: appl ptr (0...boundary-1) */
+ __pad_before_uframe __pad2;
+
+ __pad_before_uframe __pad3;
+ snd_pcm_uframes_t avail_min; /* RW: min available frames for wakeup */
+ __pad_after_uframe __pad4;
+};
+
+struct __snd_pcm_sync_ptr64 {
+ __u32 flags;
+ __u32 pad1;
+ union {
+ struct __snd_pcm_mmap_status64 status;
unsigned char reserved[64];
} s;
union {
- struct snd_pcm_mmap_control control;
+ struct __snd_pcm_mmap_control64 control;
unsigned char reserved[64];
} c;
};
@@ -584,6 +678,8 @@ enum {
#define SNDRV_PCM_IOCTL_STATUS _IOR('A', 0x20, struct snd_pcm_status)
#define SNDRV_PCM_IOCTL_DELAY _IOR('A', 0x21, snd_pcm_sframes_t)
#define SNDRV_PCM_IOCTL_HWSYNC _IO('A', 0x22)
+#define __SNDRV_PCM_IOCTL_SYNC_PTR _IOWR('A', 0x23, struct __snd_pcm_sync_ptr)
+#define __SNDRV_PCM_IOCTL_SYNC_PTR64 _IOWR('A', 0x23, struct __snd_pcm_sync_ptr64)
#define SNDRV_PCM_IOCTL_SYNC_PTR _IOWR('A', 0x23, struct snd_pcm_sync_ptr)
#define SNDRV_PCM_IOCTL_STATUS_EXT _IOWR('A', 0x24, struct snd_pcm_status)
#define SNDRV_PCM_IOCTL_CHANNEL_INFO _IOR('A', 0x32, struct snd_pcm_channel_info)
@@ -614,7 +710,7 @@ enum {
* Raw MIDI section - /dev/snd/midi??
*/
-#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 0)
+#define SNDRV_RAWMIDI_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 1)
enum {
SNDRV_RAWMIDI_STREAM_OUTPUT = 0,
@@ -648,13 +744,16 @@ struct snd_rawmidi_params {
unsigned char reserved[16]; /* reserved for future use */
};
+#ifndef __KERNEL__
struct snd_rawmidi_status {
int stream;
+ __time_pad pad1;
struct timespec tstamp; /* Timestamp */
size_t avail; /* available bytes */
size_t xruns; /* count of overruns since last status (in bytes) */
unsigned char reserved[16]; /* reserved for future use */
};
+#endif
#define SNDRV_RAWMIDI_IOCTL_PVERSION _IOR('W', 0x00, int)
#define SNDRV_RAWMIDI_IOCTL_INFO _IOR('W', 0x01, struct snd_rawmidi_info)
@@ -667,7 +766,7 @@ struct snd_rawmidi_status {
* Timer section - /dev/snd/timer
*/
-#define SNDRV_TIMER_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 6)
+#define SNDRV_TIMER_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 7)
enum {
SNDRV_TIMER_CLASS_NONE = -1,
@@ -761,6 +860,7 @@ struct snd_timer_params {
unsigned char reserved[60]; /* reserved */
};
+#ifndef __KERNEL__
struct snd_timer_status {
struct timespec tstamp; /* Timestamp - last update */
unsigned int resolution; /* current period resolution in ns */
@@ -769,10 +869,11 @@ struct snd_timer_status {
unsigned int queue; /* used queue size */
unsigned char reserved[64]; /* reserved */
};
+#endif
#define SNDRV_TIMER_IOCTL_PVERSION _IOR('T', 0x00, int)
#define SNDRV_TIMER_IOCTL_NEXT_DEVICE _IOWR('T', 0x01, struct snd_timer_id)
-#define SNDRV_TIMER_IOCTL_TREAD _IOW('T', 0x02, int)
+#define SNDRV_TIMER_IOCTL_TREAD_OLD _IOW('T', 0x02, int)
#define SNDRV_TIMER_IOCTL_GINFO _IOWR('T', 0x03, struct snd_timer_ginfo)
#define SNDRV_TIMER_IOCTL_GPARAMS _IOW('T', 0x04, struct snd_timer_gparams)
#define SNDRV_TIMER_IOCTL_GSTATUS _IOWR('T', 0x05, struct snd_timer_gstatus)
@@ -785,6 +886,15 @@ struct snd_timer_status {
#define SNDRV_TIMER_IOCTL_STOP _IO('T', 0xa1)
#define SNDRV_TIMER_IOCTL_CONTINUE _IO('T', 0xa2)
#define SNDRV_TIMER_IOCTL_PAUSE _IO('T', 0xa3)
+#define SNDRV_TIMER_IOCTL_TREAD64 _IOW('T', 0xa4, int)
+
+#if __BITS_PER_LONG == 64
+#define SNDRV_TIMER_IOCTL_TREAD SNDRV_TIMER_IOCTL_TREAD_OLD
+#else
+#define SNDRV_TIMER_IOCTL_TREAD ((sizeof(__kernel_long_t) >= sizeof(time_t)) ? \
+ SNDRV_TIMER_IOCTL_TREAD_OLD : \
+ SNDRV_TIMER_IOCTL_TREAD64)
+#endif
struct snd_timer_read {
unsigned int resolution;
@@ -810,11 +920,15 @@ enum {
SNDRV_TIMER_EVENT_MRESUME = SNDRV_TIMER_EVENT_RESUME + 10,
};
+#ifndef __KERNEL__
struct snd_timer_tread {
int event;
+ __time_pad pad1;
struct timespec tstamp;
unsigned int val;
+ __time_pad pad2;
};
+#endif
/****************************************************************************
* *
@@ -822,7 +936,7 @@ struct snd_timer_tread {
* *
****************************************************************************/
-#define SNDRV_CTL_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 7)
+#define SNDRV_CTL_VERSION SNDRV_PROTOCOL_VERSION(2, 0, 8)
struct snd_ctl_card_info {
int card; /* card number */
@@ -860,7 +974,7 @@ typedef int __bitwise snd_ctl_elem_iface_t;
#define SNDRV_CTL_ELEM_ACCESS_WRITE (1<<1)
#define SNDRV_CTL_ELEM_ACCESS_READWRITE (SNDRV_CTL_ELEM_ACCESS_READ|SNDRV_CTL_ELEM_ACCESS_WRITE)
#define SNDRV_CTL_ELEM_ACCESS_VOLATILE (1<<2) /* control value may be changed without a notification */
-#define SNDRV_CTL_ELEM_ACCESS_TIMESTAMP (1<<3) /* when was control changed */
+// (1 << 3) is unused.
#define SNDRV_CTL_ELEM_ACCESS_TLV_READ (1<<4) /* TLV read is possible */
#define SNDRV_CTL_ELEM_ACCESS_TLV_WRITE (1<<5) /* TLV write is possible */
#define SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE (SNDRV_CTL_ELEM_ACCESS_TLV_READ|SNDRV_CTL_ELEM_ACCESS_TLV_WRITE)
@@ -926,11 +1040,7 @@ struct snd_ctl_elem_info {
} enumerated;
unsigned char reserved[128];
} value;
- union {
- unsigned short d[4]; /* dimensions */
- unsigned short *d_ptr; /* indirect - obsoleted */
- } dimen;
- unsigned char reserved[64-4*sizeof(unsigned short)];
+ unsigned char reserved[64];
};
struct snd_ctl_elem_value {
@@ -955,8 +1065,7 @@ struct snd_ctl_elem_value {
} bytes;
struct snd_aes_iec958 iec958;
} value; /* RO */
- struct timespec tstamp;
- unsigned char reserved[128-sizeof(struct timespec)];
+ unsigned char reserved[128];
};
struct snd_ctl_tlv {
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index 3d4d6de66a17..9c96fb0e4d90 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -317,12 +317,22 @@ struct snd_enc_generic {
__s32 reserved[15]; /* Can be used for SND_AUDIOCODEC_BESPOKE */
} __attribute__((packed, aligned(4)));
+struct snd_dec_flac {
+ __u16 sample_size;
+ __u16 min_blk_size;
+ __u16 max_blk_size;
+ __u16 min_frame_size;
+ __u16 max_frame_size;
+ __u16 reserved;
+} __attribute__((packed, aligned(4)));
+
union snd_codec_options {
struct snd_enc_wma wma;
struct snd_enc_vorbis vorbis;
struct snd_enc_real real;
struct snd_enc_flac flac;
struct snd_enc_generic generic;
+ struct snd_dec_flac flac_d;
} __attribute__((packed, aligned(4)));
/** struct snd_codec_desc - description of codec capabilities
diff --git a/include/uapi/sound/emu10k1.h b/include/uapi/sound/emu10k1.h
index 042c5a6f16ee..88609cc0524c 100644
--- a/include/uapi/sound/emu10k1.h
+++ b/include/uapi/sound/emu10k1.h
@@ -23,8 +23,9 @@
#ifndef _UAPI__SOUND_EMU10K1_H
#define _UAPI__SOUND_EMU10K1_H
+#ifdef __linux__
#include <linux/types.h>
-#include <sound/asound.h>
+#endif
/*
* ---- FX8010 ----
@@ -282,8 +283,22 @@ struct snd_emu10k1_fx8010_info {
#define EMU10K1_GPR_TRANSLATION_TREBLE 3
#define EMU10K1_GPR_TRANSLATION_ONOFF 4
+enum emu10k1_ctl_elem_iface {
+ EMU10K1_CTL_ELEM_IFACE_MIXER = 2, /* virtual mixer device */
+ EMU10K1_CTL_ELEM_IFACE_PCM = 3, /* PCM device */
+};
+
+struct emu10k1_ctl_elem_id {
+ unsigned int pad; /* don't use */
+ int iface; /* interface identifier */
+ unsigned int device; /* device/client number */
+ unsigned int subdevice; /* subdevice (substream) number */
+ unsigned char name[44]; /* ASCII name of item */
+ unsigned int index; /* index of item */
+};
+
struct snd_emu10k1_fx8010_control_gpr {
- struct snd_ctl_elem_id id; /* full control ID definition */
+ struct emu10k1_ctl_elem_id id; /* full control ID definition */
unsigned int vcount; /* visible count */
unsigned int count; /* count of GPR (1..16) */
unsigned short gpr[32]; /* GPR number(s) */
@@ -296,7 +311,7 @@ struct snd_emu10k1_fx8010_control_gpr {
/* old ABI without TLV support */
struct snd_emu10k1_fx8010_control_old_gpr {
- struct snd_ctl_elem_id id;
+ struct emu10k1_ctl_elem_id id;
unsigned int vcount;
unsigned int count;
unsigned short gpr[32];
@@ -310,24 +325,24 @@ struct snd_emu10k1_fx8010_code {
char name[128];
__EMU10K1_DECLARE_BITMAP(gpr_valid, 0x200); /* bitmask of valid initializers */
- __u32 __user *gpr_map; /* initializers */
+ __u32 *gpr_map; /* initializers */
unsigned int gpr_add_control_count; /* count of GPR controls to add/replace */
- struct snd_emu10k1_fx8010_control_gpr __user *gpr_add_controls; /* GPR controls to add/replace */
+ struct snd_emu10k1_fx8010_control_gpr *gpr_add_controls; /* GPR controls to add/replace */
unsigned int gpr_del_control_count; /* count of GPR controls to remove */
- struct snd_ctl_elem_id __user *gpr_del_controls; /* IDs of GPR controls to remove */
+ struct emu10k1_ctl_elem_id *gpr_del_controls; /* IDs of GPR controls to remove */
unsigned int gpr_list_control_count; /* count of GPR controls to list */
unsigned int gpr_list_control_total; /* total count of GPR controls */
- struct snd_emu10k1_fx8010_control_gpr __user *gpr_list_controls; /* listed GPR controls */
+ struct snd_emu10k1_fx8010_control_gpr *gpr_list_controls; /* listed GPR controls */
__EMU10K1_DECLARE_BITMAP(tram_valid, 0x100); /* bitmask of valid initializers */
- __u32 __user *tram_data_map; /* data initializers */
- __u32 __user *tram_addr_map; /* map initializers */
+ __u32 *tram_data_map; /* data initializers */
+ __u32 *tram_addr_map; /* map initializers */
__EMU10K1_DECLARE_BITMAP(code_valid, 1024); /* bitmask of valid instructions */
- __u32 __user *code; /* one instruction - 64 bits */
+ __u32 *code; /* one instruction - 64 bits */
};
struct snd_emu10k1_fx8010_tram {
@@ -371,11 +386,4 @@ struct snd_emu10k1_fx8010_pcm_rec {
#define SNDRV_EMU10K1_IOCTL_SINGLE_STEP _IOW ('H', 0x83, int)
#define SNDRV_EMU10K1_IOCTL_DBG_READ _IOR ('H', 0x84, int)
-/* typedefs for compatibility to user-space */
-typedef struct snd_emu10k1_fx8010_info emu10k1_fx8010_info_t;
-typedef struct snd_emu10k1_fx8010_control_gpr emu10k1_fx8010_control_gpr_t;
-typedef struct snd_emu10k1_fx8010_code emu10k1_fx8010_code_t;
-typedef struct snd_emu10k1_fx8010_tram emu10k1_fx8010_tram_t;
-typedef struct snd_emu10k1_fx8010_pcm_rec emu10k1_fx8010_pcm_t;
-
#endif /* _UAPI__SOUND_EMU10K1_H */
diff --git a/include/uapi/sound/hdsp.h b/include/uapi/sound/hdsp.h
index 5dc0c3db0a4c..b8df62b60f4d 100644
--- a/include/uapi/sound/hdsp.h
+++ b/include/uapi/sound/hdsp.h
@@ -20,7 +20,9 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#ifdef __linux__
#include <linux/types.h>
+#endif
#define HDSP_MATRIX_MIXER_SIZE 2048
@@ -74,7 +76,7 @@ struct hdsp_config_info {
#define SNDRV_HDSP_IOCTL_GET_CONFIG_INFO _IOR('H', 0x41, struct hdsp_config_info)
struct hdsp_firmware {
- void __user *firmware_data; /* 24413 x 4 bytes */
+ void *firmware_data; /* 24413 x 4 bytes */
};
#define SNDRV_HDSP_IOCTL_UPLOAD_FIRMWARE _IOW('H', 0x42, struct hdsp_firmware)
@@ -99,13 +101,4 @@ struct hdsp_9632_aeb {
#define SNDRV_HDSP_IOCTL_GET_9632_AEB _IOR('H', 0x45, struct hdsp_9632_aeb)
-/* typedefs for compatibility to user-space */
-typedef enum HDSP_IO_Type HDSP_IO_Type;
-typedef struct hdsp_peak_rms hdsp_peak_rms_t;
-typedef struct hdsp_config_info hdsp_config_info_t;
-typedef struct hdsp_firmware hdsp_firmware_t;
-typedef struct hdsp_version hdsp_version_t;
-typedef struct hdsp_mixer hdsp_mixer_t;
-typedef struct hdsp_9632_aeb hdsp_9632_aeb_t;
-
#endif /* __SOUND_HDSP_H */
diff --git a/include/uapi/sound/hdspm.h b/include/uapi/sound/hdspm.h
index a38f3f79beb7..14af3d00ea3f 100644
--- a/include/uapi/sound/hdspm.h
+++ b/include/uapi/sound/hdspm.h
@@ -21,7 +21,9 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
+#ifdef __linux__
#include <linux/types.h>
+#endif
/* Maximum channels is 64 even on 56Mode you have 64playbacks to matrix */
#define HDSPM_MAX_CHANNELS 64
@@ -221,12 +223,4 @@ struct hdspm_mixer_ioctl {
/* use indirect access due to the limit of ioctl bit size */
#define SNDRV_HDSPM_IOCTL_GET_MIXER _IOR('H', 0x44, struct hdspm_mixer_ioctl)
-/* typedefs for compatibility to user-space */
-typedef struct hdspm_peak_rms hdspm_peak_rms_t;
-typedef struct hdspm_config_info hdspm_config_info_t;
-typedef struct hdspm_version hdspm_version_t;
-typedef struct hdspm_channelfader snd_hdspm_channelfader_t;
-typedef struct hdspm_mixer hdspm_mixer_t;
-
-
#endif
diff --git a/include/uapi/sound/sof/abi.h b/include/uapi/sound/sof/abi.h
index a0fe0d4c4b66..c0ef1643c753 100644
--- a/include/uapi/sound/sof/abi.h
+++ b/include/uapi/sound/sof/abi.h
@@ -26,7 +26,7 @@
/* SOF ABI version major, minor and patch numbers */
#define SOF_ABI_MAJOR 3
-#define SOF_ABI_MINOR 10
+#define SOF_ABI_MINOR 12
#define SOF_ABI_PATCH 0
/* SOF ABI version number. Format within 32bit word is MMmmmppp */
diff --git a/include/uapi/sound/sof/tokens.h b/include/uapi/sound/sof/tokens.h
index 8f996857fb24..2a25cd8da503 100644
--- a/include/uapi/sound/sof/tokens.h
+++ b/include/uapi/sound/sof/tokens.h
@@ -57,6 +57,12 @@
#define SOF_TKN_SRC_RATE_IN 300
#define SOF_TKN_SRC_RATE_OUT 301
+/* ASRC */
+#define SOF_TKN_ASRC_RATE_IN 320
+#define SOF_TKN_ASRC_RATE_OUT 321
+#define SOF_TKN_ASRC_ASYNCHRONOUS_MODE 322
+#define SOF_TKN_ASRC_OPERATION_MODE 323
+
/* PCM */
#define SOF_TKN_PCM_DMAC_CONFIG 353
@@ -107,11 +113,17 @@
#define SOF_TKN_EFFECT_TYPE SOF_TKN_PROCESS_TYPE
/* SAI */
-#define SOF_TKN_IMX_SAI_FIRST_TOKEN 1000
-/* TODO: Add SAI tokens */
+#define SOF_TKN_IMX_SAI_MCLK_ID 1000
/* ESAI */
-#define SOF_TKN_IMX_ESAI_FIRST_TOKEN 1100
-/* TODO: Add ESAI tokens */
+#define SOF_TKN_IMX_ESAI_MCLK_ID 1100
+
+/* Stream */
+#define SOF_TKN_STREAM_PLAYBACK_COMPATIBLE_D0I3 1200
+#define SOF_TKN_STREAM_CAPTURE_COMPATIBLE_D0I3 1201
+
+/* Led control for mute switches */
+#define SOF_TKN_MUTE_LED_USE 1300
+#define SOF_TKN_MUTE_LED_DIRECTION 1301
#endif
diff --git a/include/vdso/bits.h b/include/vdso/bits.h
new file mode 100644
index 000000000000..6d005a1f5d94
--- /dev/null
+++ b/include/vdso/bits.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_BITS_H
+#define __VDSO_BITS_H
+
+#include <vdso/const.h>
+
+#define BIT(nr) (UL(1) << (nr))
+
+#endif /* __VDSO_BITS_H */
diff --git a/include/vdso/clocksource.h b/include/vdso/clocksource.h
new file mode 100644
index 000000000000..c682e7c60273
--- /dev/null
+++ b/include/vdso/clocksource.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_CLOCKSOURCE_H
+#define __VDSO_CLOCKSOURCE_H
+
+#include <vdso/limits.h>
+
+#ifdef CONFIG_GENERIC_GETTIMEOFDAY
+#include <asm/vdso/clocksource.h>
+#endif /* CONFIG_GENERIC_GETTIMEOFDAY */
+
+enum vdso_clock_mode {
+ VDSO_CLOCKMODE_NONE,
+#ifdef CONFIG_GENERIC_GETTIMEOFDAY
+ VDSO_ARCH_CLOCKMODES,
+#endif
+ VDSO_CLOCKMODE_MAX,
+
+ /* Indicator for time namespace VDSO */
+ VDSO_CLOCKMODE_TIMENS = INT_MAX
+};
+
+#endif /* __VDSO_CLOCKSOURCE_H */
diff --git a/include/vdso/const.h b/include/vdso/const.h
new file mode 100644
index 000000000000..94b385ad438d
--- /dev/null
+++ b/include/vdso/const.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_CONST_H
+#define __VDSO_CONST_H
+
+#include <uapi/linux/const.h>
+
+#define UL(x) (_UL(x))
+#define ULL(x) (_ULL(x))
+
+#endif /* __VDSO_CONST_H */
diff --git a/include/vdso/datapage.h b/include/vdso/datapage.h
index 2e302c0f41f7..5cbc9fcbfd45 100644
--- a/include/vdso/datapage.h
+++ b/include/vdso/datapage.h
@@ -4,9 +4,20 @@
#ifndef __ASSEMBLY__
-#include <linux/bits.h>
-#include <linux/time.h>
-#include <linux/types.h>
+#include <linux/compiler.h>
+#include <uapi/linux/time.h>
+#include <uapi/linux/types.h>
+#include <uapi/asm-generic/errno-base.h>
+
+#include <vdso/bits.h>
+#include <vdso/clocksource.h>
+#include <vdso/ktime.h>
+#include <vdso/limits.h>
+#include <vdso/math64.h>
+#include <vdso/processor.h>
+#include <vdso/time.h>
+#include <vdso/time32.h>
+#include <vdso/time64.h>
#define VDSO_BASES (CLOCK_TAI + 1)
#define VDSO_HRES (BIT(CLOCK_REALTIME) | \
@@ -48,6 +59,7 @@ struct vdso_timestamp {
* @mult: clocksource multiplier
* @shift: clocksource shift
* @basetime[clock_id]: basetime per clock_id
+ * @offset[clock_id]: time namespace offset per clock_id
* @tz_minuteswest: minutes west of Greenwich
* @tz_dsttime: type of DST correction
* @hrtimer_res: hrtimer resolution
@@ -55,6 +67,17 @@ struct vdso_timestamp {
*
* vdso_data will be accessed by 64 bit and compat code at the same time
* so we should be careful before modifying this structure.
+ *
+ * @basetime is used to store the base time for the system wide time getter
+ * VVAR page.
+ *
+ * @offset is used by the special time namespace VVAR pages which are
+ * installed instead of the real VVAR page. These namespace pages must set
+ * @seq to 1 and @clock_mode to VLOCK_TIMENS to force the code into the
+ * time namespace slow path. The namespace aware functions retrieve the
+ * real system wide VVAR page, read host time and add the per clock offset.
+ * For clocks which are not affected by time namespace adjustment the
+ * offset must be zero.
*/
struct vdso_data {
u32 seq;
@@ -65,7 +88,10 @@ struct vdso_data {
u32 mult;
u32 shift;
- struct vdso_timestamp basetime[VDSO_BASES];
+ union {
+ struct vdso_timestamp basetime[VDSO_BASES];
+ struct timens_offset offset[VDSO_BASES];
+ };
s32 tz_minuteswest;
s32 tz_dsttime;
@@ -84,6 +110,22 @@ struct vdso_data {
*/
extern struct vdso_data _vdso_data[CS_BASES] __attribute__((visibility("hidden")));
+/*
+ * The generic vDSO implementation requires that gettimeofday.h
+ * provides:
+ * - __arch_get_vdso_data(): to get the vdso datapage.
+ * - __arch_get_hw_counter(): to get the hw counter based on the
+ * clock_mode.
+ * - gettimeofday_fallback(): fallback for gettimeofday.
+ * - clock_gettime_fallback(): fallback for clock_gettime.
+ * - clock_getres_fallback(): fallback for clock_getres.
+ */
+#ifdef ENABLE_COMPAT_VDSO
+#include <asm/vdso/compat_gettimeofday.h>
+#else
+#include <asm/vdso/gettimeofday.h>
+#endif /* ENABLE_COMPAT_VDSO */
+
#endif /* !__ASSEMBLY__ */
#endif /* __VDSO_DATAPAGE_H */
diff --git a/include/vdso/helpers.h b/include/vdso/helpers.h
index 01641dbb68ef..9a2af9fca45e 100644
--- a/include/vdso/helpers.h
+++ b/include/vdso/helpers.h
@@ -10,7 +10,7 @@ static __always_inline u32 vdso_read_begin(const struct vdso_data *vd)
{
u32 seq;
- while ((seq = READ_ONCE(vd->seq)) & 1)
+ while (unlikely((seq = READ_ONCE(vd->seq)) & 1))
cpu_relax();
smp_rmb();
diff --git a/include/vdso/jiffies.h b/include/vdso/jiffies.h
new file mode 100644
index 000000000000..2f9d596c8b29
--- /dev/null
+++ b/include/vdso/jiffies.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_JIFFIES_H
+#define __VDSO_JIFFIES_H
+
+#include <asm/param.h> /* for HZ */
+#include <vdso/time64.h>
+
+/* TICK_NSEC is the time between ticks in nsec assuming SHIFTED_HZ */
+#define TICK_NSEC ((NSEC_PER_SEC+HZ/2)/HZ)
+
+#endif /* __VDSO_JIFFIES_H */
diff --git a/include/vdso/ktime.h b/include/vdso/ktime.h
new file mode 100644
index 000000000000..a0fd07239e0e
--- /dev/null
+++ b/include/vdso/ktime.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_KTIME_H
+#define __VDSO_KTIME_H
+
+#include <vdso/jiffies.h>
+
+/*
+ * The resolution of the clocks. The resolution value is returned in
+ * the clock_getres() system call to give application programmers an
+ * idea of the (in)accuracy of timers. Timer values are rounded up to
+ * this resolution values.
+ */
+#define LOW_RES_NSEC TICK_NSEC
+#define KTIME_LOW_RES (LOW_RES_NSEC)
+
+#endif /* __VDSO_KTIME_H */
diff --git a/include/vdso/limits.h b/include/vdso/limits.h
new file mode 100644
index 000000000000..0197888ad0e0
--- /dev/null
+++ b/include/vdso/limits.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_LIMITS_H
+#define __VDSO_LIMITS_H
+
+#define USHRT_MAX ((unsigned short)~0U)
+#define SHRT_MAX ((short)(USHRT_MAX >> 1))
+#define SHRT_MIN ((short)(-SHRT_MAX - 1))
+#define INT_MAX ((int)(~0U >> 1))
+#define INT_MIN (-INT_MAX - 1)
+#define UINT_MAX (~0U)
+#define LONG_MAX ((long)(~0UL >> 1))
+#define LONG_MIN (-LONG_MAX - 1)
+#define ULONG_MAX (~0UL)
+#define LLONG_MAX ((long long)(~0ULL >> 1))
+#define LLONG_MIN (-LLONG_MAX - 1)
+#define ULLONG_MAX (~0ULL)
+#define UINTPTR_MAX ULONG_MAX
+
+#endif /* __VDSO_LIMITS_H */
diff --git a/include/vdso/math64.h b/include/vdso/math64.h
new file mode 100644
index 000000000000..7da703ee5561
--- /dev/null
+++ b/include/vdso/math64.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_MATH64_H
+#define __VDSO_MATH64_H
+
+static __always_inline u32
+__iter_div_u64_rem(u64 dividend, u32 divisor, u64 *remainder)
+{
+ u32 ret = 0;
+
+ while (dividend >= divisor) {
+ /* The following asm() prevents the compiler from
+ optimising this loop into a modulo operation. */
+ asm("" : "+rm"(dividend));
+
+ dividend -= divisor;
+ ret++;
+ }
+
+ *remainder = dividend;
+
+ return ret;
+}
+
+#endif /* __VDSO_MATH64_H */
diff --git a/include/vdso/processor.h b/include/vdso/processor.h
new file mode 100644
index 000000000000..fbe8265ea3c4
--- /dev/null
+++ b/include/vdso/processor.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 ARM Ltd.
+ */
+#ifndef __VDSO_PROCESSOR_H
+#define __VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/vdso/processor.h>
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __VDSO_PROCESSOR_H */
diff --git a/include/vdso/time.h b/include/vdso/time.h
new file mode 100644
index 000000000000..739f53cd2949
--- /dev/null
+++ b/include/vdso/time.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_TIME_H
+#define __VDSO_TIME_H
+
+#include <uapi/linux/types.h>
+
+struct timens_offset {
+ s64 sec;
+ u64 nsec;
+};
+
+#endif /* __VDSO_TIME_H */
diff --git a/include/vdso/time32.h b/include/vdso/time32.h
new file mode 100644
index 000000000000..fdf56f932f67
--- /dev/null
+++ b/include/vdso/time32.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_TIME32_H
+#define __VDSO_TIME32_H
+
+typedef s32 old_time32_t;
+
+struct old_timespec32 {
+ old_time32_t tv_sec;
+ s32 tv_nsec;
+};
+
+struct old_timeval32 {
+ old_time32_t tv_sec;
+ s32 tv_usec;
+};
+
+#endif /* __VDSO_TIME32_H */
diff --git a/include/vdso/time64.h b/include/vdso/time64.h
new file mode 100644
index 000000000000..9d43c3f5e89d
--- /dev/null
+++ b/include/vdso/time64.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __VDSO_TIME64_H
+#define __VDSO_TIME64_H
+
+/* Parameters used to convert the timespec values: */
+#define MSEC_PER_SEC 1000L
+#define USEC_PER_MSEC 1000L
+#define NSEC_PER_USEC 1000L
+#define NSEC_PER_MSEC 1000000L
+#define USEC_PER_SEC 1000000L
+#define NSEC_PER_SEC 1000000000L
+#define FSEC_PER_SEC 1000000000000000LL
+
+#endif /* __VDSO_TIME64_H */
diff --git a/include/video/mipi_display.h b/include/video/mipi_display.h
index cba57a678daf..b6d8b874233f 100644
--- a/include/video/mipi_display.h
+++ b/include/video/mipi_display.h
@@ -17,6 +17,9 @@ enum {
MIPI_DSI_H_SYNC_START = 0x21,
MIPI_DSI_H_SYNC_END = 0x31,
+ MIPI_DSI_COMPRESSION_MODE = 0x07,
+ MIPI_DSI_END_OF_TRANSMISSION = 0x08,
+
MIPI_DSI_COLOR_MODE_OFF = 0x02,
MIPI_DSI_COLOR_MODE_ON = 0x12,
MIPI_DSI_SHUTDOWN_PERIPHERAL = 0x22,
@@ -34,19 +37,18 @@ enum {
MIPI_DSI_DCS_SHORT_WRITE_PARAM = 0x15,
MIPI_DSI_DCS_READ = 0x06,
-
- MIPI_DSI_DCS_COMPRESSION_MODE = 0x07,
- MIPI_DSI_PPS_LONG_WRITE = 0x0A,
+ MIPI_DSI_EXECUTE_QUEUE = 0x16,
MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE = 0x37,
- MIPI_DSI_END_OF_TRANSMISSION = 0x08,
-
MIPI_DSI_NULL_PACKET = 0x09,
MIPI_DSI_BLANKING_PACKET = 0x19,
MIPI_DSI_GENERIC_LONG_WRITE = 0x29,
MIPI_DSI_DCS_LONG_WRITE = 0x39,
+ MIPI_DSI_PICTURE_PARAMETER_SET = 0x0a,
+ MIPI_DSI_COMPRESSED_PIXEL_STREAM = 0x0b,
+
MIPI_DSI_LOOSELY_PACKED_PIXEL_STREAM_YCBCR20 = 0x0c,
MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR24 = 0x1c,
MIPI_DSI_PACKED_PIXEL_STREAM_YCBCR16 = 0x2c,
@@ -77,7 +79,9 @@ enum {
enum {
MIPI_DCS_NOP = 0x00,
MIPI_DCS_SOFT_RESET = 0x01,
+ MIPI_DCS_GET_COMPRESSION_MODE = 0x03,
MIPI_DCS_GET_DISPLAY_ID = 0x04,
+ MIPI_DCS_GET_ERROR_COUNT_ON_DSI = 0x05,
MIPI_DCS_GET_RED_CHANNEL = 0x06,
MIPI_DCS_GET_GREEN_CHANNEL = 0x07,
MIPI_DCS_GET_BLUE_CHANNEL = 0x08,
@@ -92,6 +96,8 @@ enum {
MIPI_DCS_EXIT_SLEEP_MODE = 0x11,
MIPI_DCS_ENTER_PARTIAL_MODE = 0x12,
MIPI_DCS_ENTER_NORMAL_MODE = 0x13,
+ MIPI_DCS_GET_IMAGE_CHECKSUM_RGB = 0x14,
+ MIPI_DCS_GET_IMAGE_CHECKSUM_CT = 0x15,
MIPI_DCS_EXIT_INVERT_MODE = 0x20,
MIPI_DCS_ENTER_INVERT_MODE = 0x21,
MIPI_DCS_SET_GAMMA_CURVE = 0x26,
@@ -102,7 +108,8 @@ enum {
MIPI_DCS_WRITE_MEMORY_START = 0x2C,
MIPI_DCS_WRITE_LUT = 0x2D,
MIPI_DCS_READ_MEMORY_START = 0x2E,
- MIPI_DCS_SET_PARTIAL_AREA = 0x30,
+ MIPI_DCS_SET_PARTIAL_ROWS = 0x30, /* MIPI DCS 1.02 - MIPI_DCS_SET_PARTIAL_AREA before that */
+ MIPI_DCS_SET_PARTIAL_COLUMNS = 0x31,
MIPI_DCS_SET_SCROLL_AREA = 0x33,
MIPI_DCS_SET_TEAR_OFF = 0x34,
MIPI_DCS_SET_TEAR_ON = 0x35,
@@ -112,7 +119,10 @@ enum {
MIPI_DCS_ENTER_IDLE_MODE = 0x39,
MIPI_DCS_SET_PIXEL_FORMAT = 0x3A,
MIPI_DCS_WRITE_MEMORY_CONTINUE = 0x3C,
+ MIPI_DCS_SET_3D_CONTROL = 0x3D,
MIPI_DCS_READ_MEMORY_CONTINUE = 0x3E,
+ MIPI_DCS_GET_3D_CONTROL = 0x3F,
+ MIPI_DCS_SET_VSYNC_TIMING = 0x40,
MIPI_DCS_SET_TEAR_SCANLINE = 0x44,
MIPI_DCS_GET_SCANLINE = 0x45,
MIPI_DCS_SET_DISPLAY_BRIGHTNESS = 0x51, /* MIPI DCS 1.3 */
@@ -124,7 +134,9 @@ enum {
MIPI_DCS_SET_CABC_MIN_BRIGHTNESS = 0x5E, /* MIPI DCS 1.3 */
MIPI_DCS_GET_CABC_MIN_BRIGHTNESS = 0x5F, /* MIPI DCS 1.3 */
MIPI_DCS_READ_DDB_START = 0xA1,
+ MIPI_DCS_READ_PPS_START = 0xA2,
MIPI_DCS_READ_DDB_CONTINUE = 0xA8,
+ MIPI_DCS_READ_PPS_CONTINUE = 0xA9,
};
/* MIPI DCS pixel formats */
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
index 3f40501fc60b..2af7a1cd6658 100644
--- a/include/xen/interface/io/ring.h
+++ b/include/xen/interface/io/ring.h
@@ -125,35 +125,24 @@ struct __name##_back_ring { \
memset((_s)->pad, 0, sizeof((_s)->pad)); \
} while(0)
-#define FRONT_RING_INIT(_r, _s, __size) do { \
- (_r)->req_prod_pvt = 0; \
- (_r)->rsp_cons = 0; \
+#define FRONT_RING_ATTACH(_r, _s, _i, __size) do { \
+ (_r)->req_prod_pvt = (_i); \
+ (_r)->rsp_cons = (_i); \
(_r)->nr_ents = __RING_SIZE(_s, __size); \
(_r)->sring = (_s); \
} while (0)
-#define BACK_RING_INIT(_r, _s, __size) do { \
- (_r)->rsp_prod_pvt = 0; \
- (_r)->req_cons = 0; \
- (_r)->nr_ents = __RING_SIZE(_s, __size); \
- (_r)->sring = (_s); \
-} while (0)
+#define FRONT_RING_INIT(_r, _s, __size) FRONT_RING_ATTACH(_r, _s, 0, __size)
-/* Initialize to existing shared indexes -- for recovery */
-#define FRONT_RING_ATTACH(_r, _s, __size) do { \
- (_r)->sring = (_s); \
- (_r)->req_prod_pvt = (_s)->req_prod; \
- (_r)->rsp_cons = (_s)->rsp_prod; \
+#define BACK_RING_ATTACH(_r, _s, _i, __size) do { \
+ (_r)->rsp_prod_pvt = (_i); \
+ (_r)->req_cons = (_i); \
(_r)->nr_ents = __RING_SIZE(_s, __size); \
-} while (0)
-
-#define BACK_RING_ATTACH(_r, _s, __size) do { \
(_r)->sring = (_s); \
- (_r)->rsp_prod_pvt = (_s)->rsp_prod; \
- (_r)->req_cons = (_s)->req_prod; \
- (_r)->nr_ents = __RING_SIZE(_s, __size); \
} while (0)
+#define BACK_RING_INIT(_r, _s, __size) BACK_RING_ATTACH(_r, _s, 0, __size)
+
/* How big is this ring? */
#define RING_SIZE(_r) \
((_r)->nr_ents)
diff --git a/include/xen/interface/io/tpmif.h b/include/xen/interface/io/tpmif.h
index 28e7dcd75e82..f8aa8bac5196 100644
--- a/include/xen/interface/io/tpmif.h
+++ b/include/xen/interface/io/tpmif.h
@@ -46,7 +46,7 @@ struct vtpm_shared_page {
uint8_t pad;
uint8_t nr_extra_pages; /* extra pages for long packets; may be zero */
- uint32_t extra_pages[0]; /* grant IDs; length in nr_extra_pages */
+ uint32_t extra_pages[]; /* grant IDs; length in nr_extra_pages */
};
#endif
diff --git a/include/xen/interface/xen-mca.h b/include/xen/interface/xen-mca.h
index 73a4ea714d93..7483a78d2425 100644
--- a/include/xen/interface/xen-mca.h
+++ b/include/xen/interface/xen-mca.h
@@ -183,7 +183,6 @@ struct mc_info {
DEFINE_GUEST_HANDLE_STRUCT(mc_info);
#define __MC_MSR_ARRAYSIZE 8
-#define __MC_MSR_MCGCAP 0
#define __MC_NMSRS 1
#define MC_NCAPS 7
struct mcinfo_logical_cpu {
@@ -332,7 +331,11 @@ struct xen_mc {
};
DEFINE_GUEST_HANDLE_STRUCT(xen_mc);
-/* Fields are zero when not available */
+/*
+ * Fields are zero when not available. Also, this struct is shared with
+ * userspace mcelog and thus must keep existing fields at current offsets.
+ * Only add new fields to the end of the structure
+ */
struct xen_mce {
__u64 status;
__u64 misc;
@@ -353,6 +356,9 @@ struct xen_mce {
__u32 socketid; /* CPU socket ID */
__u32 apicid; /* CPU initial apic ID */
__u64 mcgcap; /* MCGCAP MSR: machine check capabilities of CPU */
+ __u64 synd; /* MCA_SYND MSR: only valid on SMCA systems */
+ __u64 ipid; /* MCA_IPID MSR: only valid on SMCA systems */
+ __u64 ppin; /* Protected Processor Inventory Number */
};
/*
diff --git a/include/xen/swiotlb-xen.h b/include/xen/swiotlb-xen.h
index d71380f6ed0b..ffc0d3902b71 100644
--- a/include/xen/swiotlb-xen.h
+++ b/include/xen/swiotlb-xen.h
@@ -4,10 +4,10 @@
#include <linux/swiotlb.h>
-void xen_dma_sync_for_cpu(struct device *dev, dma_addr_t handle,
- phys_addr_t paddr, size_t size, enum dma_data_direction dir);
-void xen_dma_sync_for_device(struct device *dev, dma_addr_t handle,
- phys_addr_t paddr, size_t size, enum dma_data_direction dir);
+void xen_dma_sync_for_cpu(dma_addr_t handle, phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir);
+void xen_dma_sync_for_device(dma_addr_t handle, phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir);
extern int xen_swiotlb_init(int verbose, bool early);
extern const struct dma_map_ops xen_swiotlb_dma_ops;
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index d89969aa9942..095be1d66f31 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -215,7 +215,7 @@ bool xen_running_on_version_or_later(unsigned int major, unsigned int minor);
void xen_efi_runtime_setup(void);
-#ifdef CONFIG_PREEMPT
+#ifdef CONFIG_PREEMPTION
static inline void xen_preemptible_hcall_begin(void)
{
@@ -239,6 +239,6 @@ static inline void xen_preemptible_hcall_end(void)
__this_cpu_write(xen_in_preemptible_hcall, false);
}
-#endif /* CONFIG_PREEMPT */
+#endif /* CONFIG_PREEMPTION */
#endif /* INCLUDE_XEN_OPS_H */
diff --git a/include/xen/xenbus.h b/include/xen/xenbus.h
index 869c816d5f8c..850a43bd69d3 100644
--- a/include/xen/xenbus.h
+++ b/include/xen/xenbus.h
@@ -42,6 +42,7 @@
#include <linux/completion.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/semaphore.h>
#include <xen/interface/xen.h>
#include <xen/interface/grant_table.h>
#include <xen/interface/io/xenbus.h>
@@ -76,6 +77,7 @@ struct xenbus_device {
enum xenbus_state state;
struct completion down;
struct work_struct work;
+ struct semaphore reclaim_sem;
};
static inline struct xenbus_device *to_xenbus_device(struct device *dev)
@@ -93,6 +95,7 @@ struct xenbus_device_id
struct xenbus_driver {
const char *name; /* defaults to ids[0].devicetype */
const struct xenbus_device_id *ids;
+ bool allow_rebind; /* avoid setting xenstore closed during remove */
int (*probe)(struct xenbus_device *dev,
const struct xenbus_device_id *id);
void (*otherend_changed)(struct xenbus_device *dev,
@@ -104,6 +107,7 @@ struct xenbus_driver {
struct device_driver driver;
int (*read_otherend_details)(struct xenbus_device *dev);
int (*is_ready)(struct xenbus_device *dev);
+ void (*reclaim_memory)(struct xenbus_device *dev);
};
static inline struct xenbus_driver *to_xenbus_driver(struct device_driver *drv)